id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_good_2322_0
/****************************************************************************** * emulate.c * * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. * * Copyright (c) 2005 Keir Fraser * * Linux coding style, mod r/m decoder, segment base fixes, real-mode * privileged instructions: * * Copyright (C) 2006 Qumranet * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 */ #include <linux/kvm_host.h> #include "kvm_cache_regs.h" #include <linux/module.h> #include <asm/kvm_emulate.h> #include <linux/stringify.h> #include "x86.h" #include "tss.h" /* * Operand types */ #define OpNone 0ull #define OpImplicit 1ull /* No generic decode */ #define OpReg 2ull /* Register */ #define OpMem 3ull /* Memory */ #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ #define OpDI 5ull /* ES:DI/EDI/RDI */ #define OpMem64 6ull /* Memory, 64-bit */ #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ #define OpDX 8ull /* DX register */ #define OpCL 9ull /* CL register (for shifts) */ #define OpImmByte 10ull /* 8-bit sign extended immediate */ #define OpOne 11ull /* Implied 1 */ #define OpImm 12ull /* Sign extended up to 32-bit immediate */ #define OpMem16 13ull /* Memory operand (16-bit). */ #define OpMem32 14ull /* Memory operand (32-bit). */ #define OpImmU 15ull /* Immediate operand, zero extended */ #define OpSI 16ull /* SI/ESI/RSI */ #define OpImmFAddr 17ull /* Immediate far address */ #define OpMemFAddr 18ull /* Far address in memory */ #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ #define OpES 20ull /* ES */ #define OpCS 21ull /* CS */ #define OpSS 22ull /* SS */ #define OpDS 23ull /* DS */ #define OpFS 24ull /* FS */ #define OpGS 25ull /* GS */ #define OpMem8 26ull /* 8-bit zero extended memory operand */ #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) /* * Opcode effective-address decode tables. * Note that we only emulate instructions that have at least one memory * operand (excluding implicit stack references). We assume that stack * references and instruction fetches will never occur in special memory * areas that require emulation. So, for example, 'mov <imm>,<reg>' need * not be handled. */ /* Operand sizes: 8-bit operands or specified/overridden size. */ #define ByteOp (1<<0) /* 8-bit operands. */ /* Destination operand type. */ #define DstShift 1 #define ImplicitOps (OpImplicit << DstShift) #define DstReg (OpReg << DstShift) #define DstMem (OpMem << DstShift) #define DstAcc (OpAcc << DstShift) #define DstDI (OpDI << DstShift) #define DstMem64 (OpMem64 << DstShift) #define DstImmUByte (OpImmUByte << DstShift) #define DstDX (OpDX << DstShift) #define DstAccLo (OpAccLo << DstShift) #define DstMask (OpMask << DstShift) /* Source operand type. */ #define SrcShift 6 #define SrcNone (OpNone << SrcShift) #define SrcReg (OpReg << SrcShift) #define SrcMem (OpMem << SrcShift) #define SrcMem16 (OpMem16 << SrcShift) #define SrcMem32 (OpMem32 << SrcShift) #define SrcImm (OpImm << SrcShift) #define SrcImmByte (OpImmByte << SrcShift) #define SrcOne (OpOne << SrcShift) #define SrcImmUByte (OpImmUByte << SrcShift) #define SrcImmU (OpImmU << SrcShift) #define SrcSI (OpSI << SrcShift) #define SrcXLat (OpXLat << SrcShift) #define SrcImmFAddr (OpImmFAddr << SrcShift) #define SrcMemFAddr (OpMemFAddr << SrcShift) #define SrcAcc (OpAcc << SrcShift) #define SrcImmU16 (OpImmU16 << SrcShift) #define SrcImm64 (OpImm64 << SrcShift) #define SrcDX (OpDX << SrcShift) #define SrcMem8 (OpMem8 << SrcShift) #define SrcAccHi (OpAccHi << SrcShift) #define SrcMask (OpMask << SrcShift) #define BitOp (1<<11) #define MemAbs (1<<12) /* Memory operand is absolute displacement */ #define String (1<<13) /* String instruction (rep capable) */ #define Stack (1<<14) /* Stack instruction (push/pop) */ #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ #define Escape (5<<15) /* Escape to coprocessor instruction */ #define Sse (1<<18) /* SSE Vector instruction */ /* Generic ModRM decode. */ #define ModRM (1<<19) /* Destination is only written; never read. */ #define Mov (1<<20) /* Misc flags */ #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ #define Undefined (1<<25) /* No Such Instruction */ #define Lock (1<<26) /* lock prefix is allowed for the instruction */ #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ #define No64 (1<<28) #define PageTable (1 << 29) /* instruction used to write page table */ #define NotImpl (1 << 30) /* instruction is not implemented */ /* Source 2 operand type */ #define Src2Shift (31) #define Src2None (OpNone << Src2Shift) #define Src2Mem (OpMem << Src2Shift) #define Src2CL (OpCL << Src2Shift) #define Src2ImmByte (OpImmByte << Src2Shift) #define Src2One (OpOne << Src2Shift) #define Src2Imm (OpImm << Src2Shift) #define Src2ES (OpES << Src2Shift) #define Src2CS (OpCS << Src2Shift) #define Src2SS (OpSS << Src2Shift) #define Src2DS (OpDS << Src2Shift) #define Src2FS (OpFS << Src2Shift) #define Src2GS (OpGS << Src2Shift) #define Src2Mask (OpMask << Src2Shift) #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ #define NoWrite ((u64)1 << 45) /* No writeback */ #define SrcWrite ((u64)1 << 46) /* Write back src operand */ #define NoMod ((u64)1 << 47) /* Mod field is ignored */ #define Intercept ((u64)1 << 48) /* Has valid intercept field */ #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ #define NoBigReal ((u64)1 << 50) /* No big real mode */ #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) #define X2(x...) x, x #define X3(x...) X2(x), x #define X4(x...) X2(x), X2(x) #define X5(x...) X4(x), x #define X6(x...) X4(x), X2(x) #define X7(x...) X4(x), X3(x) #define X8(x...) X4(x), X4(x) #define X16(x...) X8(x), X8(x) #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) #define FASTOP_SIZE 8 /* * fastop functions have a special calling convention: * * dst: rax (in/out) * src: rdx (in/out) * src2: rcx (in) * flags: rflags (in/out) * ex: rsi (in:fastop pointer, out:zero if exception) * * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for * different operand sizes can be reached by calculation, rather than a jump * table (which would be bigger than the code). * * fastop functions are declared as taking a never-defined fastop parameter, * so they can't be called from C directly. */ struct fastop; struct opcode { u64 flags : 56; u64 intercept : 8; union { int (*execute)(struct x86_emulate_ctxt *ctxt); const struct opcode *group; const struct group_dual *gdual; const struct gprefix *gprefix; const struct escape *esc; void (*fastop)(struct fastop *fake); } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); }; struct group_dual { struct opcode mod012[8]; struct opcode mod3[8]; }; struct gprefix { struct opcode pfx_no; struct opcode pfx_66; struct opcode pfx_f2; struct opcode pfx_f3; }; struct escape { struct opcode op[8]; struct opcode high[64]; }; /* EFLAGS bit definitions. */ #define EFLG_ID (1<<21) #define EFLG_VIP (1<<20) #define EFLG_VIF (1<<19) #define EFLG_AC (1<<18) #define EFLG_VM (1<<17) #define EFLG_RF (1<<16) #define EFLG_IOPL (3<<12) #define EFLG_NT (1<<14) #define EFLG_OF (1<<11) #define EFLG_DF (1<<10) #define EFLG_IF (1<<9) #define EFLG_TF (1<<8) #define EFLG_SF (1<<7) #define EFLG_ZF (1<<6) #define EFLG_AF (1<<4) #define EFLG_PF (1<<2) #define EFLG_CF (1<<0) #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a #define EFLG_RESERVED_ONE_MASK 2 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) { if (!(ctxt->regs_valid & (1 << nr))) { ctxt->regs_valid |= 1 << nr; ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); } return ctxt->_regs[nr]; } static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) { ctxt->regs_valid |= 1 << nr; ctxt->regs_dirty |= 1 << nr; return &ctxt->_regs[nr]; } static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) { reg_read(ctxt, nr); return reg_write(ctxt, nr); } static void writeback_registers(struct x86_emulate_ctxt *ctxt) { unsigned reg; for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); } static void invalidate_registers(struct x86_emulate_ctxt *ctxt) { ctxt->regs_dirty = 0; ctxt->regs_valid = 0; } /* * These EFLAGS bits are restored from saved value during emulation, and * any changes are written back to the saved value after emulation. */ #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) #ifdef CONFIG_X86_64 #define ON64(x) x #else #define ON64(x) #endif static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" #define FOP_RET "ret \n\t" #define FOP_START(op) \ extern void em_##op(struct fastop *fake); \ asm(".pushsection .text, \"ax\" \n\t" \ ".global em_" #op " \n\t" \ FOP_ALIGN \ "em_" #op ": \n\t" #define FOP_END \ ".popsection") #define FOPNOP() FOP_ALIGN FOP_RET #define FOP1E(op, dst) \ FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET #define FOP1EEX(op, dst) \ FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) #define FASTOP1(op) \ FOP_START(op) \ FOP1E(op##b, al) \ FOP1E(op##w, ax) \ FOP1E(op##l, eax) \ ON64(FOP1E(op##q, rax)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m) */ #define FASTOP1SRC2(op, name) \ FOP_START(name) \ FOP1E(op, cl) \ FOP1E(op, cx) \ FOP1E(op, ecx) \ ON64(FOP1E(op, rcx)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ #define FASTOP1SRC2EX(op, name) \ FOP_START(name) \ FOP1EEX(op, cl) \ FOP1EEX(op, cx) \ FOP1EEX(op, ecx) \ ON64(FOP1EEX(op, rcx)) \ FOP_END #define FOP2E(op, dst, src) \ FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET #define FASTOP2(op) \ FOP_START(op) \ FOP2E(op##b, al, dl) \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, word only */ #define FASTOP2W(op) \ FOP_START(op) \ FOPNOP() \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, src is CL */ #define FASTOP2CL(op) \ FOP_START(op) \ FOP2E(op##b, al, cl) \ FOP2E(op##w, ax, cl) \ FOP2E(op##l, eax, cl) \ ON64(FOP2E(op##q, rax, cl)) \ FOP_END #define FOP3E(op, dst, src, src2) \ FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET /* 3-operand, word-only, src2=cl */ #define FASTOP3WCL(op) \ FOP_START(op) \ FOPNOP() \ FOP3E(op##w, ax, dx, cl) \ FOP3E(op##l, eax, edx, cl) \ ON64(FOP3E(op##q, rax, rdx, cl)) \ FOP_END /* Special case for SETcc - 1 instruction per cc */ #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" asm(".global kvm_fastop_exception \n" "kvm_fastop_exception: xor %esi, %esi; ret"); FOP_START(setcc) FOP_SETCC(seto) FOP_SETCC(setno) FOP_SETCC(setc) FOP_SETCC(setnc) FOP_SETCC(setz) FOP_SETCC(setnz) FOP_SETCC(setbe) FOP_SETCC(setnbe) FOP_SETCC(sets) FOP_SETCC(setns) FOP_SETCC(setp) FOP_SETCC(setnp) FOP_SETCC(setl) FOP_SETCC(setnl) FOP_SETCC(setle) FOP_SETCC(setnle) FOP_END; FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET FOP_END; static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage) { struct x86_instruction_info info = { .intercept = intercept, .rep_prefix = ctxt->rep_prefix, .modrm_mod = ctxt->modrm_mod, .modrm_reg = ctxt->modrm_reg, .modrm_rm = ctxt->modrm_rm, .src_val = ctxt->src.val64, .dst_val = ctxt->dst.val64, .src_bytes = ctxt->src.bytes, .dst_bytes = ctxt->dst.bytes, .ad_bytes = ctxt->ad_bytes, .next_rip = ctxt->eip, }; return ctxt->ops->intercept(ctxt, &info, stage); } static void assign_masked(ulong *dest, ulong src, ulong mask) { *dest = (*dest & ~mask) | (src & mask); } static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) { return (1UL << (ctxt->ad_bytes << 3)) - 1; } static ulong stack_mask(struct x86_emulate_ctxt *ctxt) { u16 sel; struct desc_struct ss; if (ctxt->mode == X86EMUL_MODE_PROT64) return ~0UL; ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ } static int stack_size(struct x86_emulate_ctxt *ctxt) { return (__fls(stack_mask(ctxt)) + 1) >> 3; } /* Access/update address held in a register, based on addressing mode. */ static inline unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) { if (ctxt->ad_bytes == sizeof(unsigned long)) return reg; else return reg & ad_mask(ctxt); } static inline unsigned long register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) { return address_mask(ctxt, reg); } static void masked_increment(ulong *reg, ulong mask, int inc) { assign_masked(reg, *reg + inc, mask); } static inline void register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) { ulong mask; if (ctxt->ad_bytes == sizeof(unsigned long)) mask = ~0UL; else mask = ad_mask(ctxt); masked_increment(reg, mask, inc); } static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) { masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); } static u32 desc_limit_scaled(struct desc_struct *desc) { u32 limit = get_desc_limit(desc); return desc->g ? (limit << 12) | 0xfff : limit; } static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) { if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) return 0; return ctxt->ops->get_cached_segment_base(ctxt, seg); } static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, u32 error, bool valid) { WARN_ON(vec > 0x1f); ctxt->exception.vector = vec; ctxt->exception.error_code = error; ctxt->exception.error_code_valid = valid; return X86EMUL_PROPAGATE_FAULT; } static int emulate_db(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DB_VECTOR, 0, false); } static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, GP_VECTOR, err, true); } static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, SS_VECTOR, err, true); } static int emulate_ud(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, UD_VECTOR, 0, false); } static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, TS_VECTOR, err, true); } static int emulate_de(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DE_VECTOR, 0, false); } static int emulate_nm(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, NM_VECTOR, 0, false); } static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, int cs_l) { switch (ctxt->op_bytes) { case 2: ctxt->_eip = (u16)dst; break; case 4: ctxt->_eip = (u32)dst; break; case 8: if ((cs_l && is_noncanonical_address(dst)) || (!cs_l && (dst & ~(u32)-1))) return emulate_gp(ctxt, 0); ctxt->_eip = dst; break; default: WARN(1, "unsupported eip assignment size\n"); } return X86EMUL_CONTINUE; } static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) { return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64); } static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) { return assign_eip_near(ctxt, ctxt->_eip + rel); } static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) { u16 selector; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); return selector; } static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, unsigned seg) { u16 dummy; u32 base3; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); } /* * x86 defines three classes of vector instructions: explicitly * aligned, explicitly unaligned, and the rest, which change behaviour * depending on whether they're AVX encoded or not. * * Also included is CMPXCHG16B which is not a vector instruction, yet it is * subject to the same check. */ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) { if (likely(size < 16)) return false; if (ctxt->d & Aligned) return true; else if (ctxt->d & Unaligned) return false; else if (ctxt->d & Avx) return false; else return true; } static int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, bool fetch, ulong *linear) { struct desc_struct desc; bool usable; ulong la; u32 lim; u16 sel; unsigned cpl; la = seg_base(ctxt, addr.seg) + addr.ea; switch (ctxt->mode) { case X86EMUL_MODE_PROT64: if (((signed long)la << 16) >> 16 != la) return emulate_gp(ctxt, 0); break; default: usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, addr.seg); if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && write) goto bad; /* unreadable code segment */ if (!fetch && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && (ctxt->d & NoBigReal)) { /* la is between zero and 0xffff */ if (la > 0xffff || (u32)(la + size - 1) > 0xffff) goto bad; } else if ((desc.type & 8) || !(desc.type & 4)) { /* expand-up segment */ if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) goto bad; } else { /* expand-down segment */ if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) goto bad; lim = desc.d ? 0xffffffff : 0xffff; if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) goto bad; } cpl = ctxt->ops->cpl(ctxt); if (!(desc.type & 8)) { /* data segment */ if (cpl > desc.dpl) goto bad; } else if ((desc.type & 8) && !(desc.type & 4)) { /* nonconforming code segment */ if (cpl != desc.dpl) goto bad; } else if ((desc.type & 8) && (desc.type & 4)) { /* conforming code segment */ if (cpl < desc.dpl) goto bad; } break; } if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8) la &= (u32)-1; if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) return emulate_gp(ctxt, 0); *linear = la; return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) return emulate_ss(ctxt, sel); else return emulate_gp(ctxt, sel); } static int linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, ulong *linear) { return __linearize(ctxt, addr, size, write, false, linear); } static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); } /* * Prefetch the remaining bytes of the instruction without crossing page * boundary if they are not in fetch_cache yet. */ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) { int rc; unsigned size; unsigned long linear; int cur_size = ctxt->fetch.end - ctxt->fetch.data; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = ctxt->eip + cur_size }; size = 15UL ^ cur_size; rc = __linearize(ctxt, addr, size, false, true, &linear); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); /* * One instruction can only straddle two pages, * and one has been loaded at the beginning of * x86_decode_insn. So, if not enough bytes * still, we must have hit the 15-byte boundary. */ if (unlikely(size < op_size)) return X86EMUL_UNHANDLEABLE; rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, size, &ctxt->exception); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; ctxt->fetch.end += size; return X86EMUL_CONTINUE; } static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, unsigned size) { unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; if (unlikely(done_size < size)) return __do_insn_fetch_bytes(ctxt, size - done_size); else return X86EMUL_CONTINUE; } /* Fetch next part of the instruction being emulated. */ #define insn_fetch(_type, _ctxt) \ ({ _type _x; \ \ rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += sizeof(_type); \ _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \ ctxt->fetch.ptr += sizeof(_type); \ _x; \ }) #define insn_fetch_arr(_arr, _size, _ctxt) \ ({ \ rc = do_insn_fetch_bytes(_ctxt, _size); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += (_size); \ memcpy(_arr, ctxt->fetch.ptr, _size); \ ctxt->fetch.ptr += (_size); \ }) /* * Given the 'reg' portion of a ModRM byte, and a register block, return a * pointer into the block that addresses the relevant register. * @highbyte_regs specifies whether to decode AH,CH,DH,BH. */ static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, int byteop) { void *p; int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; else p = reg_rmw(ctxt, modrm_reg); return p; } static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes) { int rc; if (op_bytes == 2) op_bytes = 3; *address = 0; rc = segmented_read_std(ctxt, addr, size, 2); if (rc != X86EMUL_CONTINUE) return rc; addr.ea += 2; rc = segmented_read_std(ctxt, addr, address, op_bytes); return rc; } FASTOP2(add); FASTOP2(or); FASTOP2(adc); FASTOP2(sbb); FASTOP2(and); FASTOP2(sub); FASTOP2(xor); FASTOP2(cmp); FASTOP2(test); FASTOP1SRC2(mul, mul_ex); FASTOP1SRC2(imul, imul_ex); FASTOP1SRC2EX(div, div_ex); FASTOP1SRC2EX(idiv, idiv_ex); FASTOP3WCL(shld); FASTOP3WCL(shrd); FASTOP2W(imul); FASTOP1(not); FASTOP1(neg); FASTOP1(inc); FASTOP1(dec); FASTOP2CL(rol); FASTOP2CL(ror); FASTOP2CL(rcl); FASTOP2CL(rcr); FASTOP2CL(shl); FASTOP2CL(shr); FASTOP2CL(sar); FASTOP2W(bsf); FASTOP2W(bsr); FASTOP2W(bt); FASTOP2W(bts); FASTOP2W(btr); FASTOP2W(btc); FASTOP2(xadd); static u8 test_cc(unsigned int condition, unsigned long flags) { u8 rc; void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; asm("push %[flags]; popf; call *%[fastop]" : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); return rc; } static void fetch_register_operand(struct operand *op) { switch (op->bytes) { case 1: op->val = *(u8 *)op->addr.reg; break; case 2: op->val = *(u16 *)op->addr.reg; break; case 4: op->val = *(u32 *)op->addr.reg; break; case 8: op->val = *(u64 *)op->addr.reg; break; } } static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static int em_fninit(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fninit"); ctxt->ops->put_fpu(ctxt); return X86EMUL_CONTINUE; } static int em_fnstcw(struct x86_emulate_ctxt *ctxt) { u16 fcw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstcw %0": "+m"(fcw)); ctxt->ops->put_fpu(ctxt); /* force 2 byte destination */ ctxt->dst.bytes = 2; ctxt->dst.val = fcw; return X86EMUL_CONTINUE; } static int em_fnstsw(struct x86_emulate_ctxt *ctxt) { u16 fsw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstsw %0": "+m"(fsw)); ctxt->ops->put_fpu(ctxt); /* force 2 byte destination */ ctxt->dst.bytes = 2; ctxt->dst.val = fsw; return X86EMUL_CONTINUE; } static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { unsigned reg = ctxt->modrm_reg; if (!(ctxt->d & ModRM)) reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = reg; read_sse_reg(ctxt, &op->vec_val, reg); return; } if (ctxt->d & Mmx) { reg &= 7; op->type = OP_MM; op->bytes = 8; op->addr.mm = reg; return; } op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); fetch_register_operand(op); op->orig_val = op->val; } static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) { if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) ctxt->modrm_seg = VCPU_SREG_SS; } static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op) { u8 sib; int index_reg, base_reg, scale; int rc = X86EMUL_CONTINUE; ulong modrm_ea = 0; ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); ctxt->modrm_seg = VCPU_SREG_DS; if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = ctxt->modrm_rm; read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); return rc; } if (ctxt->d & Mmx) { op->type = OP_MM; op->bytes = 8; op->addr.mm = ctxt->modrm_rm & 7; return rc; } fetch_register_operand(op); return rc; } op->type = OP_MEM; if (ctxt->ad_bytes == 2) { unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); unsigned si = reg_read(ctxt, VCPU_REGS_RSI); unsigned di = reg_read(ctxt, VCPU_REGS_RDI); /* 16-bit ModR/M decode. */ switch (ctxt->modrm_mod) { case 0: if (ctxt->modrm_rm == 6) modrm_ea += insn_fetch(u16, ctxt); break; case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(u16, ctxt); break; } switch (ctxt->modrm_rm) { case 0: modrm_ea += bx + si; break; case 1: modrm_ea += bx + di; break; case 2: modrm_ea += bp + si; break; case 3: modrm_ea += bp + di; break; case 4: modrm_ea += si; break; case 5: modrm_ea += di; break; case 6: if (ctxt->modrm_mod != 0) modrm_ea += bp; break; case 7: modrm_ea += bx; break; } if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) ctxt->modrm_seg = VCPU_SREG_SS; modrm_ea = (u16)modrm_ea; } else { /* 32/64-bit ModR/M decode. */ if ((ctxt->modrm_rm & 7) == 4) { sib = insn_fetch(u8, ctxt); index_reg |= (sib >> 3) & 7; base_reg |= sib & 7; scale = sib >> 6; if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) modrm_ea += insn_fetch(s32, ctxt); else { modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } if (index_reg != 4) modrm_ea += reg_read(ctxt, index_reg) << scale; } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->rip_relative = 1; } else { base_reg = ctxt->modrm_rm; modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } switch (ctxt->modrm_mod) { case 0: if (ctxt->modrm_rm == 5) modrm_ea += insn_fetch(s32, ctxt); break; case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(s32, ctxt); break; } } op->addr.mem.ea = modrm_ea; if (ctxt->ad_bytes != 8) ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; done: return rc; } static int decode_abs(struct x86_emulate_ctxt *ctxt, struct operand *op) { int rc = X86EMUL_CONTINUE; op->type = OP_MEM; switch (ctxt->ad_bytes) { case 2: op->addr.mem.ea = insn_fetch(u16, ctxt); break; case 4: op->addr.mem.ea = insn_fetch(u32, ctxt); break; case 8: op->addr.mem.ea = insn_fetch(u64, ctxt); break; } done: return rc; } static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) { long sv = 0, mask; if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { mask = ~((long)ctxt->dst.bytes * 8 - 1); if (ctxt->src.bytes == 2) sv = (s16)ctxt->src.val & (s16)mask; else if (ctxt->src.bytes == 4) sv = (s32)ctxt->src.val & (s32)mask; else sv = (s64)ctxt->src.val & (s64)mask; ctxt->dst.addr.mem.ea += (sv >> 3); } /* only subword offset */ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; } static int read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *dest, unsigned size) { int rc; struct read_cache *mc = &ctxt->mem_read; if (mc->pos < mc->end) goto read_cached; WARN_ON((mc->end + size) >= sizeof(mc->data)); rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; mc->end += size; read_cached: memcpy(dest, mc->data + mc->pos, size); mc->pos += size; return X86EMUL_CONTINUE; } static int segmented_read(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return read_emulated(ctxt, linear, data, size); } static int segmented_write(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_emulated(ctxt, linear, data, size, &ctxt->exception); } static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *orig_data, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, size, &ctxt->exception); } static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, unsigned int size, unsigned short port, void *dest) { struct read_cache *rc = &ctxt->io_read; if (rc->pos == rc->end) { /* refill pio read ahead */ unsigned int in_page, n; unsigned int count = ctxt->rep_prefix ? address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; in_page = (ctxt->eflags & EFLG_DF) ? offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); if (n == 0) n = 1; rc->pos = rc->end = 0; if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) return 0; rc->end = n * size; } if (ctxt->rep_prefix && (ctxt->d & String) && !(ctxt->eflags & EFLG_DF)) { ctxt->dst.data = rc->data + rc->pos; ctxt->dst.type = OP_MEM_STR; ctxt->dst.count = (rc->end - rc->pos) / size; rc->pos = rc->end; } else { memcpy(dest, rc->data + rc->pos, size); rc->pos += size; } return 1; } static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, u16 index, struct desc_struct *desc) { struct desc_ptr dt; ulong addr; ctxt->ops->get_idt(ctxt, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_ptr *dt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 base3 = 0; if (selector & 1 << 2) { struct desc_struct desc; u16 sel; memset (dt, 0, sizeof *dt); if (!ops->get_segment(ctxt, &sel, &desc, &base3, VCPU_SREG_LDTR)) return; dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ dt->address = get_desc_base(&desc) | ((u64)base3 << 32); } else ops->get_gdt(ctxt, dt); } /* allowed just for 8 bytes segments */ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, ulong *desc_addr_p) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); *desc_addr_p = addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } /* allowed just for 8 bytes segments */ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } /* Does not support long mode */ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, bool in_task_switch, struct desc_struct *desc) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ ulong desc_addr; int ret; u16 dummy; u32 base3 = 0; memset(&seg_desc, 0, sizeof seg_desc); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for * unreal mode) */ ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); set_desc_base(&seg_desc, selector << 4); goto load; } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { /* VM86 needs a clean new segment descriptor */ set_desc_base(&seg_desc, selector << 4); set_desc_limit(&seg_desc, 0xffff); seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = 3; goto load; } rpl = selector & 3; /* NULL selector is not valid for TR, CS and SS (except for long mode) */ if ((seg == VCPU_SREG_CS || (seg == VCPU_SREG_SS && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) || seg == VCPU_SREG_TR) && null_selector) goto exception; /* TR should be in GDT only */ if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; if (null_selector) /* for NULL selector skip all following checks */ goto load; ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; err_code = selector & 0xfffc; err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR; /* can't load system descriptor into segment selector */ if (seg <= VCPU_SREG_GS && !seg_desc.s) goto exception; if (!seg_desc.p) { err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; goto exception; } dpl = seg_desc.dpl; switch (seg) { case VCPU_SREG_SS: /* * segment is not a writable data segment or segment * selector's RPL != CPL or segment selector's RPL != CPL */ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) goto exception; break; case VCPU_SREG_CS: if (!(seg_desc.type & 8)) goto exception; if (seg_desc.type & 4) { /* conforming */ if (dpl > cpl) goto exception; } else { /* nonconforming */ if (rpl > cpl || dpl != cpl) goto exception; } /* in long-mode d/b must be clear if l is set */ if (seg_desc.d && seg_desc.l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) goto exception; } /* CS(RPL) <- CPL */ selector = (selector & 0xfffc) | cpl; break; case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, sizeof(seg_desc), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) goto exception; break; default: /* DS, ES, FS, or GS */ /* * segment is not a data or readable code segment or * ((segment is a data or nonconforming code segment) * and (both RPL and CPL > DPL)) */ if ((seg_desc.type & 0xa) == 0x8 || (((seg_desc.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl))) goto exception; break; } if (seg_desc.s) { /* mark segment as accessed */ seg_desc.type |= 1; ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, sizeof(base3), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); if (desc) *desc = seg_desc; return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); } static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg) { u8 cpl = ctxt->ops->cpl(ctxt); return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL); } static void write_register_operand(struct operand *op) { /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ switch (op->bytes) { case 1: *(u8 *)op->addr.reg = (u8)op->val; break; case 2: *(u16 *)op->addr.reg = (u16)op->val; break; case 4: *op->addr.reg = (u32)op->val; break; /* 64b: zero-extend */ case 8: *op->addr.reg = op->val; break; } } static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) { switch (op->type) { case OP_REG: write_register_operand(op); break; case OP_MEM: if (ctxt->lock_prefix) return segmented_cmpxchg(ctxt, op->addr.mem, &op->orig_val, &op->val, op->bytes); else return segmented_write(ctxt, op->addr.mem, &op->val, op->bytes); break; case OP_MEM_STR: return segmented_write(ctxt, op->addr.mem, op->data, op->bytes * op->count); break; case OP_XMM: write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); break; case OP_MM: write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); break; case OP_NONE: /* no writeback */ break; default: break; } return X86EMUL_CONTINUE; } static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) { struct segmented_address addr; rsp_increment(ctxt, -bytes); addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; return segmented_write(ctxt, addr, data, bytes); } static int em_push(struct x86_emulate_ctxt *ctxt) { /* Disable writeback. */ ctxt->dst.type = OP_NONE; return push(ctxt, &ctxt->src.val, ctxt->op_bytes); } static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; struct segmented_address addr; addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; rc = segmented_read(ctxt, addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, len); return rc; } static int em_pop(struct x86_emulate_ctxt *ctxt) { return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int emulate_popf(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; unsigned long val, change_mask; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &val, len); if (rc != X86EMUL_CONTINUE) return rc; change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; switch(ctxt->mode) { case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT16: if (cpl == 0) change_mask |= EFLG_IOPL; if (cpl <= iopl) change_mask |= EFLG_IF; break; case X86EMUL_MODE_VM86: if (iopl < 3) return emulate_gp(ctxt, 0); change_mask |= EFLG_IF; break; default: /* real mode */ change_mask |= (EFLG_IOPL | EFLG_IF); break; } *(unsigned long *)dest = (ctxt->eflags & ~change_mask) | (val & change_mask); return rc; } static int em_popf(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = &ctxt->eflags; ctxt->dst.bytes = ctxt->op_bytes; return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int em_enter(struct x86_emulate_ctxt *ctxt) { int rc; unsigned frame_size = ctxt->src.val; unsigned nesting_level = ctxt->src2.val & 31; ulong rbp; if (nesting_level) return X86EMUL_UNHANDLEABLE; rbp = reg_read(ctxt, VCPU_REGS_RBP); rc = push(ctxt, &rbp, stack_size(ctxt)); if (rc != X86EMUL_CONTINUE) return rc; assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), stack_mask(ctxt)); assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RSP) - frame_size, stack_mask(ctxt)); return X86EMUL_CONTINUE; } static int em_leave(struct x86_emulate_ctxt *ctxt) { assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), stack_mask(ctxt)); return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); } static int em_push_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; ctxt->src.val = get_segment_selector(ctxt, seg); return em_push(ctxt); } static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned long selector; int rc; rc = emulate_pop(ctxt, &selector, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; rc = load_segment_descriptor(ctxt, (u16)selector, seg); return rc; } static int em_pusha(struct x86_emulate_ctxt *ctxt) { unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RAX; while (reg <= VCPU_REGS_RDI) { (reg == VCPU_REGS_RSP) ? (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ++reg; } return rc; } static int em_pushf(struct x86_emulate_ctxt *ctxt) { ctxt->src.val = (unsigned long)ctxt->eflags; return em_push(ctxt); } static int em_popa(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RDI; while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { rsp_increment(ctxt, ctxt->op_bytes); --reg; } rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) break; --reg; } return rc; } static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { const struct x86_emulate_ops *ops = ctxt->ops; int rc; struct desc_ptr dt; gva_t cs_addr; gva_t eip_addr; u16 cs, eip; /* TODO: Add limit checks */ ctxt->src.val = ctxt->eflags; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = ctxt->_eip; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ops->get_idt(ctxt, &dt); eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = eip; return rc; } int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { int rc; invalidate_registers(ctxt); rc = __emulate_int_real(ctxt, irq); if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return rc; } static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return __emulate_int_real(ctxt, irq); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* Protected mode interrupts unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; unsigned long temp_eip = 0; unsigned long temp_eflags = 0; unsigned long cs = 0; unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; /* TODO: Add stack limit check */ rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (temp_eip & ~0xffff) return emulate_gp(ctxt, 0); rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = temp_eip; if (ctxt->op_bytes == 4) ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); else if (ctxt->op_bytes == 2) { ctxt->eflags &= ~0xffff; ctxt->eflags |= temp_eflags; } ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ ctxt->eflags |= EFLG_RESERVED_ONE_MASK; return rc; } static int em_iret(struct x86_emulate_ctxt *ctxt) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return emulate_iret_real(ctxt); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* iret from protected mode unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel, old_sel; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; u8 cpl = ctxt->ops->cpl(ctxt); /* Assignment of RIP may only fail in 64-bit mode */ if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_sel, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); if (rc != X86EMUL_CONTINUE) { WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); return rc; } return rc; } static int em_grp45(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; switch (ctxt->modrm_reg) { case 2: /* call near abs */ { long int old_eip; old_eip = ctxt->_eip; rc = assign_eip_near(ctxt, ctxt->src.val); if (rc != X86EMUL_CONTINUE) break; ctxt->src.val = old_eip; rc = em_push(ctxt); break; } case 4: /* jmp abs */ rc = assign_eip_near(ctxt, ctxt->src.val); break; case 5: /* jmp far */ rc = em_jmp_far(ctxt); break; case 6: /* push */ rc = em_push(ctxt); break; } return rc; } static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) { u64 old = ctxt->dst.orig_val64; if (ctxt->dst.bytes == 16) return X86EMUL_UNHANDLEABLE; if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); ctxt->eflags &= ~EFLG_ZF; } else { ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | (u32) reg_read(ctxt, VCPU_REGS_RBX); ctxt->eflags |= EFLG_ZF; } return X86EMUL_CONTINUE; } static int em_ret(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; return assign_eip_near(ctxt, eip); } static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip, cs; u16 old_cs; int cpl = ctxt->ops->cpl(ctxt); struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; /* Outer-privilege level return is not implemented */ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) return X86EMUL_UNHANDLEABLE; rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, eip, new_desc.l); if (rc != X86EMUL_CONTINUE) { WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); } return rc; } static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) { int rc; rc = em_ret_far(ctxt); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) { /* Save real source value, then compare EAX against destination. */ ctxt->dst.orig_val = ctxt->dst.val; ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); ctxt->src.orig_val = ctxt->src.val; ctxt->src.val = ctxt->dst.orig_val; fastop(ctxt, em_cmp); if (ctxt->eflags & EFLG_ZF) { /* Success: write back to memory. */ ctxt->dst.val = ctxt->src.orig_val; } else { /* Failure: write the value we saw to EAX. */ ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); ctxt->dst.val = ctxt->dst.orig_val; } return X86EMUL_CONTINUE; } static int em_lseg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned short sel; int rc; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = load_segment_descriptor(ctxt, sel, seg); if (rc != X86EMUL_CONTINUE) return rc; ctxt->dst.val = ctxt->src.val; return rc; } static void setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, struct desc_struct *cs, struct desc_struct *ss) { cs->l = 0; /* will be adjusted later */ set_desc_base(cs, 0); /* flat segment */ cs->g = 1; /* 4kb granularity */ set_desc_limit(cs, 0xfffff); /* 4GB limit */ cs->type = 0x0b; /* Read, Execute, Accessed */ cs->s = 1; cs->dpl = 0; /* will be adjusted later */ cs->p = 1; cs->d = 1; cs->avl = 0; set_desc_base(ss, 0); /* flat segment */ set_desc_limit(ss, 0xfffff); /* 4GB limit */ ss->g = 1; /* 4kb granularity */ ss->s = 1; ss->type = 0x03; /* Read/Write, Accessed */ ss->d = 1; /* 32bit stack segment */ ss->dpl = 0; ss->p = 1; ss->l = 0; ss->avl = 0; } static bool vendor_intel(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = ecx = 0; ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; } static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 eax, ebx, ecx, edx; /* * syscall should always be enabled in longmode - so only become * vendor specific (cpuid) if other modes are active... */ if (ctxt->mode == X86EMUL_MODE_PROT64) return true; eax = 0x00000000; ecx = 0x00000000; ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); /* * Intel ("GenuineIntel") * remark: Intel CPUs only support "syscall" in 64bit * longmode. Also an 64bit guest with a * 32bit compat-app running will #UD !! While this * behaviour can be fixed (by emulating) into AMD * response - CPUs of AMD can't behave like Intel. */ if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) return false; /* AMD ("AuthenticAMD") */ if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) return true; /* AMD ("AMDisbetter!") */ if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) return true; /* default: (not Intel, not AMD), apply Intel's stricter rules... */ return false; } static int em_syscall(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; /* syscall is not available in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_ud(ctxt); if (!(em_syscall_is_enabled(ctxt))) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_EFER, &efer); setup_syscalls_segments(ctxt, &cs, &ss); if (!(efer & EFER_SCE)) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); ss_sel = (u16)(msr_data + 8); if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; if (efer & EFER_LMA) { #ifdef CONFIG_X86_64 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; ops->get_msr(ctxt, ctxt->mode == X86EMUL_MODE_PROT64 ? MSR_LSTAR : MSR_CSTAR, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt->eflags &= ~msr_data; #endif } else { /* legacy mode */ ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt->_eip = (u32)msr_data; ctxt->eflags &= ~(EFLG_VM | EFLG_IF); } return X86EMUL_CONTINUE; } static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); /* * Not recognized on AMD in compat mode (but is recognized in legacy * mode). */ if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); /* XXX sysenter/sysexit have not been tested in 64bit mode. * Therefore, we inject an #UD. */ if (ctxt->mode == X86EMUL_MODE_PROT64) return emulate_ud(ctxt); setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (ctxt->mode) { case X86EMUL_MODE_PROT32: if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); break; case X86EMUL_MODE_PROT64: if (msr_data == 0x0) return emulate_gp(ctxt, 0); break; default: break; } ctxt->eflags &= ~(EFLG_VM | EFLG_IF); cs_sel = (u16)msr_data; cs_sel &= ~SELECTOR_RPL_MASK; ss_sel = cs_sel + 8; ss_sel &= ~SELECTOR_RPL_MASK; if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; return X86EMUL_CONTINUE; } static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data, rcx, rdx; int usermode; u16 cs_sel = 0, ss_sel = 0; /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); setup_syscalls_segments(ctxt, &cs, &ss); if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; rcx = reg_read(ctxt, VCPU_REGS_RCX); rdx = reg_read(ctxt, VCPU_REGS_RDX); cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); if (msr_data == 0x0) return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; if (is_noncanonical_address(rcx) || is_noncanonical_address(rdx)) return emulate_gp(ctxt, 0); break; } cs_sel |= SELECTOR_RPL_MASK; ss_sel |= SELECTOR_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = rdx; *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; } static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) { int iopl; if (ctxt->mode == X86EMUL_MODE_REAL) return false; if (ctxt->mode == X86EMUL_MODE_VM86) return true; iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; return ctxt->ops->cpl(ctxt) > iopl; } static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct tr_seg; u32 base3; int r; u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; unsigned mask = (1 << len) - 1; unsigned long base; ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); if (!tr_seg.p) return false; if (desc_limit_scaled(&tr_seg) < 103) return false; base = get_desc_base(&tr_seg); #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) return false; return true; } static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { if (ctxt->perm_ok) return true; if (emulator_bad_iopl(ctxt)) if (!emulator_io_port_access_allowed(ctxt, port, len)) return false; ctxt->perm_ok = true; return true; } static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { tss->ip = ctxt->_eip; tss->flag = ctxt->eflags; tss->ax = reg_read(ctxt, VCPU_REGS_RAX); tss->cx = reg_read(ctxt, VCPU_REGS_RCX); tss->dx = reg_read(ctxt, VCPU_REGS_RDX); tss->bx = reg_read(ctxt, VCPU_REGS_RBX); tss->sp = reg_read(ctxt, VCPU_REGS_RSP); tss->bp = reg_read(ctxt, VCPU_REGS_RBP); tss->si = reg_read(ctxt, VCPU_REGS_RSI); tss->di = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); } static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { int ret; u8 cpl; ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; /* * SDM says that segment selectors are loaded before segment * descriptors */ set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); cpl = tss->cs & 3; /* * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; save_state_to_tss16(ctxt, &tss_seg); ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; } return load_state_from_tss16(ctxt, &tss_seg); } static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { /* CR3 and ldt selector are not saved intentionally */ tss->eip = ctxt->_eip; tss->eflags = ctxt->eflags; tss->eax = reg_read(ctxt, VCPU_REGS_RAX); tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); tss->edx = reg_read(ctxt, VCPU_REGS_RDX); tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); tss->esp = reg_read(ctxt, VCPU_REGS_RSP); tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); tss->esi = reg_read(ctxt, VCPU_REGS_RSI); tss->edi = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); } static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { int ret; u8 cpl; if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; /* General purpose registers */ *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; /* * SDM says that segment selectors are loaded before segment * descriptors. This is important because CPL checks will * use CS.RPL. */ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); /* * If we're switching between Protected Mode and VM86, we need to make * sure to update the mode before loading the segment descriptors so * that the selectors are interpreted correctly. */ if (ctxt->eflags & X86_EFLAGS_VM) { ctxt->mode = X86EMUL_MODE_VM86; cpl = 3; } else { ctxt->mode = X86EMUL_MODE_PROT32; cpl = tss->cs & 3; } /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; save_state_to_tss32(ctxt, &tss_seg); /* Only GP registers and segment selectors are saved */ ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; } return load_state_from_tss32(ctxt, &tss_seg); } static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct curr_tss_desc, next_tss_desc; int ret; u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); ulong old_tss_base = ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); u32 desc_limit; ulong desc_addr; /* FIXME: old_tss_base == ~0 ? */ ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; /* FIXME: check that next_tss_desc is tss */ /* * Check privileges. The three cases are task switch caused by... * * 1. jmp/call/int to task gate: Check against DPL of the task gate * 2. Exception/IRQ/iret: No check is performed * 3. jmp/call to TSS: Check against DPL of the TSS */ if (reason == TASK_SWITCH_GATE) { if (idt_index != -1) { /* Software interrupts */ struct desc_struct task_gate_desc; int dpl; ret = read_interrupt_descriptor(ctxt, idt_index, &task_gate_desc); if (ret != X86EMUL_CONTINUE) return ret; dpl = task_gate_desc.dpl; if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) return emulate_gp(ctxt, (idt_index << 3) | 0x2); } } else if (reason != TASK_SWITCH_IRET) { int dpl = next_tss_desc.dpl; if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) return emulate_gp(ctxt, tss_selector); } desc_limit = desc_limit_scaled(&next_tss_desc); if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) { return emulate_ts(ctxt, tss_selector & 0xfffc); } if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); } if (reason == TASK_SWITCH_IRET) ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; /* set back link to prev task only if NT bit is set in eflags note that old_tss_sel is not used after this point */ if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) old_tss_sel = 0xffff; if (next_tss_desc.type & 8) ret = task_switch_32(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); else ret = task_switch_16(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); if (ret != X86EMUL_CONTINUE) return ret; if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; if (reason != TASK_SWITCH_IRET) { next_tss_desc.type |= (1 << 1); /* set busy flag */ write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); } ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); if (has_error_code) { ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; ctxt->lock_prefix = 0; ctxt->src.val = (unsigned long) error_code; ret = em_push(ctxt); } return ret; } int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { int rc; invalidate_registers(ctxt); ctxt->_eip = ctxt->eip; ctxt->dst.type = OP_NONE; rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (rc == X86EMUL_CONTINUE) { ctxt->eip = ctxt->_eip; writeback_registers(ctxt); } return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op) { int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes); op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg)); } static int em_das(struct x86_emulate_ctxt *ctxt) { u8 al, old_al; bool af, cf, old_cf; cf = ctxt->eflags & X86_EFLAGS_CF; al = ctxt->dst.val; old_al = al; old_cf = cf; cf = false; af = ctxt->eflags & X86_EFLAGS_AF; if ((al & 0x0f) > 9 || af) { al -= 6; cf = old_cf | (al >= 250); af = true; } else { af = false; } if (old_al > 0x99 || old_cf) { al -= 0x60; cf = true; } ctxt->dst.val = al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); if (cf) ctxt->eflags |= X86_EFLAGS_CF; if (af) ctxt->eflags |= X86_EFLAGS_AF; return X86EMUL_CONTINUE; } static int em_aam(struct x86_emulate_ctxt *ctxt) { u8 al, ah; if (ctxt->src.val == 0) return emulate_de(ctxt); al = ctxt->dst.val & 0xff; ah = al / ctxt->src.val; al %= ctxt->src.val; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_aad(struct x86_emulate_ctxt *ctxt) { u8 al = ctxt->dst.val & 0xff; u8 ah = (ctxt->dst.val >> 8) & 0xff; al = (al + (ah * ctxt->src.val)) & 0xff; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_call(struct x86_emulate_ctxt *ctxt) { int rc; long rel = ctxt->src.val; ctxt->src.val = (unsigned long)ctxt->_eip; rc = jmp_rel(ctxt, rel); if (rc != X86EMUL_CONTINUE) return rc; return em_push(ctxt); } static int em_call_far(struct x86_emulate_ctxt *ctxt) { u16 sel, old_cs; ulong old_eip; int rc; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; int cpl = ctxt->ops->cpl(ctxt); old_eip = ctxt->_eip; ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return X86EMUL_CONTINUE; rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_eip; rc = em_push(ctxt); /* If we failed, we tainted the memory, but the very least we should restore cs */ if (rc != X86EMUL_CONTINUE) goto fail; return rc; fail: ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); return rc; } static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_near(ctxt, eip); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_xchg(struct x86_emulate_ctxt *ctxt) { /* Write back the register source. */ ctxt->src.val = ctxt->dst.val; write_register_operand(&ctxt->src); /* Write back the memory destination with implicit LOCK prefix. */ ctxt->dst.val = ctxt->src.orig_val; ctxt->lock_prefix = 1; return X86EMUL_CONTINUE; } static int em_imul_3op(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = ctxt->src2.val; return fastop(ctxt, em_imul); } static int em_cwd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.bytes = ctxt->src.bytes; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); return X86EMUL_CONTINUE; } static int em_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 tsc = 0; ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; return X86EMUL_CONTINUE; } static int em_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 pmc; if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; return X86EMUL_CONTINUE; } static int em_mov(struct x86_emulate_ctxt *ctxt) { memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); return X86EMUL_CONTINUE; } #define FFL(x) bit(X86_FEATURE_##x) static int em_movbe(struct x86_emulate_ctxt *ctxt) { u32 ebx, ecx, edx, eax = 1; u16 tmp; /* * Check MOVBE is set in the guest-visible CPUID leaf. */ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); if (!(ecx & FFL(MOVBE))) return emulate_ud(ctxt); switch (ctxt->op_bytes) { case 2: /* * From MOVBE definition: "...When the operand size is 16 bits, * the upper word of the destination register remains unchanged * ..." * * Both casting ->valptr and ->val to u16 breaks strict aliasing * rules so we have to do the operation almost per hand. */ tmp = (u16)ctxt->src.val; ctxt->dst.val &= ~0xffffUL; ctxt->dst.val |= (unsigned long)swab16(tmp); break; case 4: ctxt->dst.val = swab32((u32)ctxt->src.val); break; case 8: ctxt->dst.val = swab64(ctxt->src.val); break; default: BUG(); } return X86EMUL_CONTINUE; } static int em_cr_write(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_dr_write(struct x86_emulate_ctxt *ctxt) { unsigned long val; if (ctxt->mode == X86EMUL_MODE_PROT64) val = ctxt->src.val & ~0ULL; else val = ctxt->src.val & ~0U; /* #UD condition is already handled. */ if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_wrmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int em_rdmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; return X86EMUL_CONTINUE; } static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) { if (ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); return X86EMUL_CONTINUE; } static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); } static int em_lldt(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); } static int em_ltr(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); } static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_clts(struct x86_emulate_ctxt *ctxt) { ulong cr0; cr0 = ctxt->ops->get_cr(ctxt, 0); cr0 &= ~X86_CR0_TS; ctxt->ops->set_cr(ctxt, 0, cr0); return X86EMUL_CONTINUE; } static int em_vmcall(struct x86_emulate_ctxt *ctxt) { int rc = ctxt->ops->fix_hypercall(ctxt); if (rc != X86EMUL_CONTINUE) return rc; /* Let the processor re-execute the fixed hypercall */ ctxt->_eip = ctxt->eip; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, void (*get)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *ptr)) { struct desc_ptr desc_ptr; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; get(ctxt, &desc_ptr); if (ctxt->op_bytes == 2) { ctxt->op_bytes = 4; desc_ptr.address &= 0x00ffffff; } /* Disable writeback. */ ctxt->dst.type = OP_NONE; return segmented_write(ctxt, ctxt->dst.addr.mem, &desc_ptr, 2 + ctxt->op_bytes); } static int em_sgdt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); } static int em_sidt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); } static int em_lgdt(struct x86_emulate_ctxt *ctxt) { struct desc_ptr desc_ptr; int rc; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; ctxt->ops->set_gdt(ctxt, &desc_ptr); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_vmmcall(struct x86_emulate_ctxt *ctxt) { int rc; rc = ctxt->ops->fix_hypercall(ctxt); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return rc; } static int em_lidt(struct x86_emulate_ctxt *ctxt) { struct desc_ptr desc_ptr; int rc; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; ctxt->ops->set_idt(ctxt, &desc_ptr); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_smsw(struct x86_emulate_ctxt *ctxt) { if (ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); return X86EMUL_CONTINUE; } static int em_lmsw(struct x86_emulate_ctxt *ctxt) { ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) | (ctxt->src.val & 0x0f)); ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_loop(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_jcxz(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_in(struct x86_emulate_ctxt *ctxt) { if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, &ctxt->dst.val)) return X86EMUL_IO_NEEDED; return X86EMUL_CONTINUE; } static int em_out(struct x86_emulate_ctxt *ctxt) { ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, &ctxt->src.val, 1); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_cli(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->eflags &= ~X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_sti(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; ctxt->eflags |= X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_cpuid(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = reg_read(ctxt, VCPU_REGS_RAX); ecx = reg_read(ctxt, VCPU_REGS_RCX); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); *reg_write(ctxt, VCPU_REGS_RAX) = eax; *reg_write(ctxt, VCPU_REGS_RBX) = ebx; *reg_write(ctxt, VCPU_REGS_RCX) = ecx; *reg_write(ctxt, VCPU_REGS_RDX) = edx; return X86EMUL_CONTINUE; } static int em_sahf(struct x86_emulate_ctxt *ctxt) { u32 flags; flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; ctxt->eflags &= ~0xffUL; ctxt->eflags |= flags | X86_EFLAGS_FIXED; return X86EMUL_CONTINUE; } static int em_lahf(struct x86_emulate_ctxt *ctxt) { *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; return X86EMUL_CONTINUE; } static int em_bswap(struct x86_emulate_ctxt *ctxt) { switch (ctxt->op_bytes) { #ifdef CONFIG_X86_64 case 8: asm("bswap %0" : "+r"(ctxt->dst.val)); break; #endif default: asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); break; } return X86EMUL_CONTINUE; } static bool valid_cr(int nr) { switch (nr) { case 0: case 2 ... 4: case 8: return true; default: return false; } } static int check_cr_read(struct x86_emulate_ctxt *ctxt) { if (!valid_cr(ctxt->modrm_reg)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_cr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int cr = ctxt->modrm_reg; u64 efer = 0; static u64 cr_reserved_bits[] = { 0xffffffff00000000ULL, 0, 0, 0, /* CR3 checked later */ CR4_RESERVED_BITS, 0, 0, 0, CR8_RESERVED_BITS, }; if (!valid_cr(cr)) return emulate_ud(ctxt); if (new_val & cr_reserved_bits[cr]) return emulate_gp(ctxt, 0); switch (cr) { case 0: { u64 cr4; if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) return emulate_gp(ctxt, 0); cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && !(cr4 & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } case 3: { u64 rsvd = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) rsvd = CR3_L_MODE_RESERVED_BITS; if (new_val & rsvd) return emulate_gp(ctxt, 0); break; } case 4: { ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } } return X86EMUL_CONTINUE; } static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) { unsigned long dr7; ctxt->ops->get_dr(ctxt, 7, &dr7); /* Check if DR7.Global_Enable is set */ return dr7 & (1 << 13); } static int check_dr_read(struct x86_emulate_ctxt *ctxt) { int dr = ctxt->modrm_reg; u64 cr4; if (dr > 7) return emulate_ud(ctxt); cr4 = ctxt->ops->get_cr(ctxt, 4); if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) return emulate_ud(ctxt); if (check_dr7_gd(ctxt)) return emulate_db(ctxt); return X86EMUL_CONTINUE; } static int check_dr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int dr = ctxt->modrm_reg; if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) return emulate_gp(ctxt, 0); return check_dr_read(ctxt); } static int check_svme(struct x86_emulate_ctxt *ctxt) { u64 efer; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (!(efer & EFER_SVME)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_svme_pa(struct x86_emulate_ctxt *ctxt) { u64 rax = reg_read(ctxt, VCPU_REGS_RAX); /* Valid physical address? */ if (rax & 0xffff000000000000ULL) return emulate_gp(ctxt, 0); return check_svme(ctxt); } static int check_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || ctxt->ops->check_pmc(ctxt, rcx)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_in(struct x86_emulate_ctxt *ctxt) { ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_out(struct x86_emulate_ctxt *ctxt) { ctxt->src.bytes = min(ctxt->src.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } #define D(_y) { .flags = (_y) } #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define N D(NotImpl) #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } #define II(_f, _e, _i) \ { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } #define IIP(_f, _e, _i, _p) \ { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } #define D2bv(_f) D((_f) | ByteOp), D(_f) #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) #define I2bvIP(_f, _e, _i, _p) \ IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) static const struct opcode group7_rm0[] = { N, I(SrcNone | Priv | EmulateOnUD, em_vmcall), N, N, N, N, N, N, }; static const struct opcode group7_rm1[] = { DI(SrcNone | Priv, monitor), DI(SrcNone | Priv, mwait), N, N, N, N, N, N, }; static const struct opcode group7_rm3[] = { DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), DIP(SrcNone | Prot | Priv, stgi, check_svme), DIP(SrcNone | Prot | Priv, clgi, check_svme), DIP(SrcNone | Prot | Priv, skinit, check_svme), DIP(SrcNone | Prot | Priv, invlpga, check_svme), }; static const struct opcode group7_rm7[] = { N, DIP(SrcNone, rdtscp, check_rdtsc), N, N, N, N, N, N, }; static const struct opcode group1[] = { F(Lock, em_add), F(Lock | PageTable, em_or), F(Lock, em_adc), F(Lock, em_sbb), F(Lock | PageTable, em_and), F(Lock, em_sub), F(Lock, em_xor), F(NoWrite, em_cmp), }; static const struct opcode group1A[] = { I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N, }; static const struct opcode group2[] = { F(DstMem | ModRM, em_rol), F(DstMem | ModRM, em_ror), F(DstMem | ModRM, em_rcl), F(DstMem | ModRM, em_rcr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_shr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_sar), }; static const struct opcode group3[] = { F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcNone | Lock, em_not), F(DstMem | SrcNone | Lock, em_neg), F(DstXacc | Src2Mem, em_mul_ex), F(DstXacc | Src2Mem, em_imul_ex), F(DstXacc | Src2Mem, em_div_ex), F(DstXacc | Src2Mem, em_idiv_ex), }; static const struct opcode group4[] = { F(ByteOp | DstMem | SrcNone | Lock, em_inc), F(ByteOp | DstMem | SrcNone | Lock, em_dec), N, N, N, N, N, N, }; static const struct opcode group5[] = { F(DstMem | SrcNone | Lock, em_inc), F(DstMem | SrcNone | Lock, em_dec), I(SrcMem | Stack, em_grp45), I(SrcMemFAddr | ImplicitOps | Stack, em_call_far), I(SrcMem | Stack, em_grp45), I(SrcMemFAddr | ImplicitOps, em_grp45), I(SrcMem | Stack, em_grp45), D(Undefined), }; static const struct opcode group6[] = { DI(Prot, sldt), DI(Prot, str), II(Prot | Priv | SrcMem16, em_lldt, lldt), II(Prot | Priv | SrcMem16, em_ltr, ltr), N, N, N, N, }; static const struct group_dual group7 = { { II(Mov | DstMem, em_sgdt, sgdt), II(Mov | DstMem, em_sidt, sidt), II(SrcMem | Priv, em_lgdt, lgdt), II(SrcMem | Priv, em_lidt, lidt), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), }, { EXT(0, group7_rm0), EXT(0, group7_rm1), N, EXT(0, group7_rm3), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7), } }; static const struct opcode group8[] = { N, N, N, N, F(DstMem | SrcImmByte | NoWrite, em_bt), F(DstMem | SrcImmByte | Lock | PageTable, em_bts), F(DstMem | SrcImmByte | Lock, em_btr), F(DstMem | SrcImmByte | Lock | PageTable, em_btc), }; static const struct group_dual group9 = { { N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, }, { N, N, N, N, N, N, N, N, } }; static const struct opcode group11[] = { I(DstMem | SrcImm | Mov | PageTable, em_mov), X7(D(Undefined)), }; static const struct gprefix pfx_0f_6f_0f_7f = { I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), }; static const struct gprefix pfx_0f_2b = { I(0, em_mov), I(0, em_mov), N, N, }; static const struct gprefix pfx_0f_28_0f_29 = { I(Aligned, em_mov), I(Aligned, em_mov), N, N, }; static const struct gprefix pfx_0f_e7 = { N, I(Sse, em_mov), N, N, }; static const struct escape escape_d9 = { { N, N, N, N, N, N, N, I(DstMem, em_fnstcw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_db = { { N, N, N, N, N, N, N, N, }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_dd = { { N, N, N, N, N, N, N, I(DstMem, em_fnstsw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct opcode opcode_table[256] = { /* 0x00 - 0x07 */ F6ALU(Lock, em_add), I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), /* 0x08 - 0x0F */ F6ALU(Lock | PageTable, em_or), I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), N, /* 0x10 - 0x17 */ F6ALU(Lock, em_adc), I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), /* 0x18 - 0x1F */ F6ALU(Lock, em_sbb), I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), /* 0x20 - 0x27 */ F6ALU(Lock | PageTable, em_and), N, N, /* 0x28 - 0x2F */ F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), /* 0x30 - 0x37 */ F6ALU(Lock, em_xor), N, N, /* 0x38 - 0x3F */ F6ALU(NoWrite, em_cmp), N, N, /* 0x40 - 0x4F */ X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), /* 0x50 - 0x57 */ X8(I(SrcReg | Stack, em_push)), /* 0x58 - 0x5F */ X8(I(DstReg | Stack, em_pop)), /* 0x60 - 0x67 */ I(ImplicitOps | Stack | No64, em_pusha), I(ImplicitOps | Stack | No64, em_popa), N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , N, N, N, N, /* 0x68 - 0x6F */ I(SrcImm | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), I(SrcImmByte | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ /* 0x70 - 0x7F */ X16(D(SrcImmByte)), /* 0x80 - 0x87 */ G(ByteOp | DstMem | SrcImm, group1), G(DstMem | SrcImm, group1), G(ByteOp | DstMem | SrcImm | No64, group1), G(DstMem | SrcImmByte, group1), F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), /* 0x88 - 0x8F */ I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), D(ModRM | SrcMem | NoAccess | DstReg), I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), G(0, group1A), /* 0x90 - 0x97 */ DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), /* 0x98 - 0x9F */ D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), I(SrcImmFAddr | No64, em_call_far), N, II(ImplicitOps | Stack, em_pushf, pushf), II(ImplicitOps | Stack, em_popf, popf), I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), /* 0xA0 - 0xA7 */ I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), I2bv(SrcSI | DstDI | Mov | String, em_mov), F2bv(SrcSI | DstDI | String | NoWrite, em_cmp), /* 0xA8 - 0xAF */ F2bv(DstAcc | SrcImm | NoWrite, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstAcc | Mov | String, em_mov), F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp), /* 0xB0 - 0xB7 */ X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), /* 0xB8 - 0xBF */ X8(I(DstReg | SrcImm64 | Mov, em_mov)), /* 0xC0 - 0xC7 */ G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), I(ImplicitOps | Stack, em_ret), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm), I(ImplicitOps | Stack, em_ret_far), D(ImplicitOps), DI(SrcImmByte, intn), D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ G(Src2One | ByteOp, group2), G(Src2One, group2), G(Src2CL | ByteOp, group2), G(Src2CL, group2), I(DstAcc | SrcImmUByte | No64, em_aam), I(DstAcc | SrcImmUByte | No64, em_aad), F(DstAcc | ByteOp | No64, em_salc), I(DstAcc | SrcXLat | ByteOp, em_mov), /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ X3(I(SrcImmByte, em_loop)), I(SrcImmByte, em_jcxz), I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), /* 0xE8 - 0xEF */ I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps), I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps), I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), /* 0xF0 - 0xF7 */ N, DI(ImplicitOps, icebp), N, N, DI(ImplicitOps | Priv, hlt), D(ImplicitOps), G(ByteOp, group3), G(0, group3), /* 0xF8 - 0xFF */ D(ImplicitOps), D(ImplicitOps), I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), }; static const struct opcode twobyte_table[256] = { /* 0x00 - 0x0F */ G(0, group6), GD(0, &group7), N, N, N, I(ImplicitOps | EmulateOnUD, em_syscall), II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM), N, N, /* 0x10 - 0x1F */ N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, check_cr_write), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, check_dr_write), N, N, N, N, GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b), N, N, N, N, /* 0x30 - 0x3F */ II(ImplicitOps | Priv, em_wrmsr, wrmsr), IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), II(ImplicitOps | Priv, em_rdmsr, rdmsr), IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), I(ImplicitOps | EmulateOnUD, em_sysenter), I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), N, N, N, N, N, N, N, N, N, N, /* 0x40 - 0x4F */ X16(D(DstReg | SrcMem | ModRM)), /* 0x50 - 0x5F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0x60 - 0x6F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x70 - 0x7F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x80 - 0x8F */ X16(D(SrcImm)), /* 0x90 - 0x9F */ X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), /* 0xA0 - 0xA7 */ I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), II(ImplicitOps, em_cpuid, cpuid), F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, /* 0xA8 - 0xAF */ I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), DI(ImplicitOps, rsm), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), D(ModRM), F(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xB8 - 0xBF */ N, N, G(BitOp, group8), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xC0 - 0xC7 */ F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), N, D(DstMem | SrcReg | ModRM | Mov), N, N, N, GD(0, &group9), /* 0xC8 - 0xCF */ X8(I(DstReg, em_bswap)), /* 0xD0 - 0xDF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0xE0 - 0xEF */ N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7), N, N, N, N, N, N, N, N, /* 0xF0 - 0xFF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N }; static const struct gprefix three_byte_0f_38_f0 = { I(DstReg | SrcMem | Mov, em_movbe), N, N, N }; static const struct gprefix three_byte_0f_38_f1 = { I(DstMem | SrcReg | Mov, em_movbe), N, N, N }; /* * Insns below are selected by the prefix which indexed by the third opcode * byte. */ static const struct opcode opcode_map_0f_38[256] = { /* 0x00 - 0x7f */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0x80 - 0xef */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0xf0 - 0xf1 */ GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0), GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1), /* 0xf2 - 0xff */ N, N, X4(N), X8(N) }; #undef D #undef N #undef G #undef GD #undef I #undef GP #undef EXT #undef D2bv #undef D2bvIP #undef I2bv #undef I2bvIP #undef I6ALU static unsigned imm_size(struct x86_emulate_ctxt *ctxt) { unsigned size; size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; if (size == 8) size = 4; return size; } static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned size, bool sign_extension) { int rc = X86EMUL_CONTINUE; op->type = OP_IMM; op->bytes = size; op->addr.mem.ea = ctxt->_eip; /* NB. Immediates are sign-extended as necessary. */ switch (op->bytes) { case 1: op->val = insn_fetch(s8, ctxt); break; case 2: op->val = insn_fetch(s16, ctxt); break; case 4: op->val = insn_fetch(s32, ctxt); break; case 8: op->val = insn_fetch(s64, ctxt); break; } if (!sign_extension) { switch (op->bytes) { case 1: op->val &= 0xff; break; case 2: op->val &= 0xffff; break; case 4: op->val &= 0xffffffff; break; } } done: return rc; } static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned d) { int rc = X86EMUL_CONTINUE; switch (d) { case OpReg: decode_register_operand(ctxt, op); break; case OpImmUByte: rc = decode_imm(ctxt, op, 1, false); break; case OpMem: ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; mem_common: *op = ctxt->memop; ctxt->memopp = op; if (ctxt->d & BitOp) fetch_bit_operand(ctxt); op->orig_val = op->val; break; case OpMem64: ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; goto mem_common; case OpAcc: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccLo: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccHi: if (ctxt->d & ByteOp) { op->type = OP_NONE; break; } op->type = OP_REG; op->bytes = ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); op->orig_val = op->val; break; case OpDI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI)); op->addr.mem.seg = VCPU_SREG_ES; op->val = 0; op->count = 1; break; case OpDX: op->type = OP_REG; op->bytes = 2; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); break; case OpCL: op->bytes = 1; op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; break; case OpImmByte: rc = decode_imm(ctxt, op, 1, true); break; case OpOne: op->bytes = 1; op->val = 1; break; case OpImm: rc = decode_imm(ctxt, op, imm_size(ctxt), true); break; case OpImm64: rc = decode_imm(ctxt, op, ctxt->op_bytes, true); break; case OpMem8: ctxt->memop.bytes = 1; if (ctxt->memop.type == OP_REG) { ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, true); fetch_register_operand(&ctxt->memop); } goto mem_common; case OpMem16: ctxt->memop.bytes = 2; goto mem_common; case OpMem32: ctxt->memop.bytes = 4; goto mem_common; case OpImmU16: rc = decode_imm(ctxt, op, 2, false); break; case OpImmU: rc = decode_imm(ctxt, op, imm_size(ctxt), false); break; case OpSI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); op->addr.mem.seg = ctxt->seg_override; op->val = 0; op->count = 1; break; case OpXLat: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, VCPU_REGS_RBX) + (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); op->addr.mem.seg = ctxt->seg_override; op->val = 0; break; case OpImmFAddr: op->type = OP_IMM; op->addr.mem.ea = ctxt->_eip; op->bytes = ctxt->op_bytes + 2; insn_fetch_arr(op->valptr, op->bytes, ctxt); break; case OpMemFAddr: ctxt->memop.bytes = ctxt->op_bytes + 2; goto mem_common; case OpES: op->val = VCPU_SREG_ES; break; case OpCS: op->val = VCPU_SREG_CS; break; case OpSS: op->val = VCPU_SREG_SS; break; case OpDS: op->val = VCPU_SREG_DS; break; case OpFS: op->val = VCPU_SREG_FS; break; case OpGS: op->val = VCPU_SREG_GS; break; case OpImplicit: /* Special instructions do their own operand decoding. */ default: op->type = OP_NONE; /* Disable writeback. */ break; } done: return rc; } int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; ctxt->_eip = ctxt->eip; ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { rc = __do_insn_fetch_bytes(ctxt, 1); if (rc != X86EMUL_CONTINUE) return rc; } switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; case X86EMUL_MODE_PROT32: def_op_bytes = def_ad_bytes = 4; break; #ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: def_op_bytes = 4; def_ad_bytes = 8; break; #endif default: return EMULATION_FAILED; } ctxt->op_bytes = def_op_bytes; ctxt->ad_bytes = def_ad_bytes; /* Legacy prefixes. */ for (;;) { switch (ctxt->b = insn_fetch(u8, ctxt)) { case 0x66: /* operand-size override */ op_prefix = true; /* switch between 2/4 bytes */ ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) /* switch between 4/8 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 12; else /* switch between 2/4 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ case 0x2e: /* CS override */ case 0x36: /* SS override */ case 0x3e: /* DS override */ has_seg_override = true; ctxt->seg_override = (ctxt->b >> 3) & 3; break; case 0x64: /* FS override */ case 0x65: /* GS override */ has_seg_override = true; ctxt->seg_override = ctxt->b & 7; break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; ctxt->rex_prefix = ctxt->b; continue; case 0xf0: /* LOCK */ ctxt->lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; } /* Any legacy prefix after a REX prefix nullifies its effect. */ ctxt->rex_prefix = 0; } done_prefixes: /* REX prefix. */ if (ctxt->rex_prefix & 8) ctxt->op_bytes = 8; /* REX.W */ /* Opcode byte(s). */ opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ if (ctxt->b == 0x0f) { ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; /* 0F_38 opcode map */ if (ctxt->b == 0x38) { ctxt->opcode_len = 3; ctxt->b = insn_fetch(u8, ctxt); opcode = opcode_map_0f_38[ctxt->b]; } } ctxt->d = opcode.flags; if (ctxt->d & ModRM) ctxt->modrm = insn_fetch(u8, ctxt); /* vex-prefix instructions are not implemented */ if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && (mode == X86EMUL_MODE_PROT64 || (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) { ctxt->d = NotImpl; } while (ctxt->d & GroupMask) { switch (ctxt->d & GroupMask) { case Group: goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: goffset = (ctxt->modrm >> 3) & 7; if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: if (ctxt->rep_prefix && op_prefix) return EMULATION_FAILED; simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; case Escape: if (ctxt->modrm > 0xbf) opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; else opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; break; default: return EMULATION_FAILED; } ctxt->d &= ~(u64)GroupMask; ctxt->d |= opcode.flags; } /* Unrecognised? */ if (ctxt->d == 0) return EMULATION_FAILED; ctxt->execute = opcode.u.execute; if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { /* * These are copied unconditionally here, and checked unconditionally * in x86_emulate_insn. */ ctxt->check_perm = opcode.check_perm; ctxt->intercept = opcode.intercept; if (ctxt->d & NotImpl) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) ctxt->op_bytes = 8; if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; else ctxt->op_bytes = 4; } if (ctxt->d & Sse) ctxt->op_bytes = 16; else if (ctxt->d & Mmx) ctxt->op_bytes = 8; } /* ModRM and SIB bytes. */ if (ctxt->d & ModRM) { rc = decode_modrm(ctxt, &ctxt->memop); if (!has_seg_override) { has_seg_override = true; ctxt->seg_override = ctxt->modrm_seg; } } else if (ctxt->d & MemAbs) rc = decode_abs(ctxt, &ctxt->memop); if (rc != X86EMUL_CONTINUE) goto done; if (!has_seg_override) ctxt->seg_override = VCPU_SREG_DS; ctxt->memop.addr.mem.seg = ctxt->seg_override; /* * Decode and fetch the source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* * Decode and fetch the second source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); if (ctxt->rip_relative) ctxt->memopp->addr.mem.ea += ctxt->_eip; done: return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) { return ctxt->d & PageTable; } static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) { /* The second termination condition only applies for REPE * and REPNE. Test if the repeat string operation prefix is * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the * corresponding termination condition according to: * - if REPE/REPZ and ZF = 0 then done * - if REPNE/REPNZ and ZF = 1 then done */ if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || (ctxt->b == 0xae) || (ctxt->b == 0xaf)) && (((ctxt->rep_prefix == REPE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == 0)) || ((ctxt->rep_prefix == REPNE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) return true; return false; } static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) { bool fault = false; ctxt->ops->get_fpu(ctxt); asm volatile("1: fwait \n\t" "2: \n\t" ".pushsection .fixup,\"ax\" \n\t" "3: \n\t" "movb $1, %[fault] \n\t" "jmp 2b \n\t" ".popsection \n\t" _ASM_EXTABLE(1b, 3b) : [fault]"+qm"(fault)); ctxt->ops->put_fpu(ctxt); if (unlikely(fault)) return emulate_exception(ctxt, MF_VECTOR, 0, false); return X86EMUL_CONTINUE; } static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { if (op->type == OP_MM) read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); } static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) { ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), [fastop]"+S"(fop) : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); if (!fop) /* exception is returned in fop variable */ return emulate_de(ctxt); return X86EMUL_CONTINUE; } void init_decode_cache(struct x86_emulate_ctxt *ctxt) { memset(&ctxt->rip_relative, 0, (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); ctxt->io_read.pos = 0; ctxt->io_read.end = 0; ctxt->mem_read.end = 0; } int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; ctxt->mem_read.pos = 0; /* LOCK prefix is allowed only with some instructions */ if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { rc = emulate_ud(ctxt); goto done; } if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || (ctxt->d & Undefined)) { rc = emulate_ud(ctxt); goto done; } if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { rc = emulate_nm(ctxt); goto done; } if (ctxt->d & Mmx) { rc = flush_pending_x87_faults(ctxt); if (rc != X86EMUL_CONTINUE) goto done; /* * Now that we know the fpu is exception safe, we can fetch * operands from it. */ fetch_possible_mmx_operand(ctxt, &ctxt->src); fetch_possible_mmx_operand(ctxt, &ctxt->src2); if (!(ctxt->d & Mov)) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } /* Privileged instruction can be executed only in CPL=0 */ if ((ctxt->d & Priv) && ops->cpl(ctxt)) { if (ctxt->d & PrivUD) rc = emulate_ud(ctxt); else rc = emulate_gp(ctxt, 0); goto done; } /* Instruction can only be executed in protected mode */ if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { rc = emulate_ud(ctxt); goto done; } /* Do instruction specific permission checks */ if (ctxt->d & CheckPerm) { rc = ctxt->check_perm(ctxt); if (rc != X86EMUL_CONTINUE) goto done; } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) { /* All REP prefixes have the same first termination condition */ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { ctxt->eip = ctxt->_eip; ctxt->eflags &= ~EFLG_RF; goto done; } } } if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { rc = segmented_read(ctxt, ctxt->src.addr.mem, ctxt->src.valptr, ctxt->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; ctxt->src.orig_val64 = ctxt->src.val64; } if (ctxt->src2.type == OP_MEM) { rc = segmented_read(ctxt, ctxt->src2.addr.mem, &ctxt->src2.val, ctxt->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; } if ((ctxt->d & DstMask) == ImplicitOps) goto special_insn; if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ rc = segmented_read(ctxt, ctxt->dst.addr.mem, &ctxt->dst.val, ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) goto done; } ctxt->dst.orig_val = ctxt->dst.val; special_insn: if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) ctxt->eflags |= EFLG_RF; else ctxt->eflags &= ~EFLG_RF; if (ctxt->execute) { if (ctxt->d & Fastop) { void (*fop)(struct fastop *) = (void *)ctxt->execute; rc = fastop(ctxt, fop); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } if (ctxt->opcode_len == 2) goto twobyte_insn; else if (ctxt->opcode_len == 3) goto threebyte_insn; switch (ctxt->b) { case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) goto cannot_emulate; ctxt->dst.val = (s32) ctxt->src.val; break; case 0x70 ... 0x7f: /* jcc (short) */ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ ctxt->dst.val = ctxt->src.addr.mem.ea; break; case 0x90 ... 0x97: /* nop / xchg reg, rax */ if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) ctxt->dst.type = OP_NONE; else rc = em_xchg(ctxt); break; case 0x98: /* cbw/cwde/cdqe */ switch (ctxt->op_bytes) { case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; case 0xcc: /* int3 */ rc = emulate_int(ctxt, 3); break; case 0xcd: /* int n */ rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: /* into */ if (ctxt->eflags & EFLG_OF) rc = emulate_int(ctxt, 4); break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ ctxt->ops->halt(ctxt); break; case 0xf5: /* cmc */ /* complement carry flag from eflags reg */ ctxt->eflags ^= EFLG_CF; break; case 0xf8: /* clc */ ctxt->eflags &= ~EFLG_CF; break; case 0xf9: /* stc */ ctxt->eflags |= EFLG_CF; break; case 0xfc: /* cld */ ctxt->eflags &= ~EFLG_DF; break; case 0xfd: /* std */ ctxt->eflags |= EFLG_DF; break; default: goto cannot_emulate; } if (rc != X86EMUL_CONTINUE) goto done; writeback: if (ctxt->d & SrcWrite) { BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); rc = writeback(ctxt, &ctxt->src); if (rc != X86EMUL_CONTINUE) goto done; } if (!(ctxt->d & NoWrite)) { rc = writeback(ctxt, &ctxt->dst); if (rc != X86EMUL_CONTINUE) goto done; } /* * restore dst type in case the decoding will be reused * (happens for string instruction ) */ ctxt->dst.type = saved_dst_type; if ((ctxt->d & SrcMask) == SrcSI) string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); if ((ctxt->d & DstMask) == DstDI) string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); if (ctxt->rep_prefix && (ctxt->d & String)) { unsigned int count; struct read_cache *r = &ctxt->io_read; if ((ctxt->d & SrcMask) == SrcSI) count = ctxt->src.count; else count = ctxt->dst.count; register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -count); if (!string_insn_completed(ctxt)) { /* * Re-enter guest when pio read ahead buffer is empty * or, if it is not used, after each 1024 iteration. */ if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && (r->end == 0 || r->end != r->pos)) { /* * Reset read cache. Usually happens before * decode, but since instruction is restarted * we have to do it here. */ ctxt->mem_read.end = 0; writeback_registers(ctxt); return EMULATION_RESTART; } goto done; /* skip rip writeback */ } ctxt->eflags &= ~EFLG_RF; } ctxt->eip = ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) { WARN_ON(ctxt->exception.vector > 0x1f); ctxt->have_exception = true; } if (rc == X86EMUL_INTERCEPTED) return EMULATION_INTERCEPTED; if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: switch (ctxt->b) { case 0x09: /* wbinvd */ (ctxt->ops->wbinvd)(ctxt); break; case 0x08: /* invd */ case 0x0d: /* GrpP (prefetch) */ case 0x18: /* Grp16 (prefetch/nop) */ case 0x1f: /* nop */ break; case 0x20: /* mov cr, reg */ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: /* mov from dr to reg */ ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); break; case 0x40 ... 0x4f: /* cmov */ if (test_cc(ctxt->b, ctxt->eflags)) ctxt->dst.val = ctxt->src.val; else if (ctxt->mode != X86EMUL_MODE_PROT64 || ctxt->op_bytes != 4) ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; case 0xae: /* clflush */ break; case 0xb6 ... 0xb7: /* movzx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val : (u16) ctxt->src.val; break; case 0xbe ... 0xbf: /* movsx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : (s16) ctxt->src.val; break; case 0xc3: /* movnti */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val : (u32) ctxt->src.val; break; default: goto cannot_emulate; } threebyte_insn: if (rc != X86EMUL_CONTINUE) goto done; goto writeback; cannot_emulate: return EMULATION_FAILED; } void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) { invalidate_registers(ctxt); } void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) { writeback_registers(ctxt); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_2322_0
crossvul-cpp_data_good_5397_0
/* * /proc/sys support */ #include <linux/init.h> #include <linux/sysctl.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/printk.h> #include <linux/security.h> #include <linux/sched.h> #include <linux/namei.h> #include <linux/mm.h> #include <linux/module.h> #include "internal.h" static const struct dentry_operations proc_sys_dentry_operations; static const struct file_operations proc_sys_file_operations; static const struct inode_operations proc_sys_inode_operations; static const struct file_operations proc_sys_dir_file_operations; static const struct inode_operations proc_sys_dir_operations; /* Support for permanently empty directories */ struct ctl_table sysctl_mount_point[] = { { } }; static bool is_empty_dir(struct ctl_table_header *head) { return head->ctl_table[0].child == sysctl_mount_point; } static void set_empty_dir(struct ctl_dir *dir) { dir->header.ctl_table[0].child = sysctl_mount_point; } static void clear_empty_dir(struct ctl_dir *dir) { dir->header.ctl_table[0].child = NULL; } void proc_sys_poll_notify(struct ctl_table_poll *poll) { if (!poll) return; atomic_inc(&poll->event); wake_up_interruptible(&poll->wait); } static struct ctl_table root_table[] = { { .procname = "", .mode = S_IFDIR|S_IRUGO|S_IXUGO, }, { } }; static struct ctl_table_root sysctl_table_root = { .default_set.dir.header = { {{.count = 1, .nreg = 1, .ctl_table = root_table }}, .ctl_table_arg = root_table, .root = &sysctl_table_root, .set = &sysctl_table_root.default_set, }, }; static DEFINE_SPINLOCK(sysctl_lock); static void drop_sysctl_table(struct ctl_table_header *header); static int sysctl_follow_link(struct ctl_table_header **phead, struct ctl_table **pentry); static int insert_links(struct ctl_table_header *head); static void put_links(struct ctl_table_header *header); static void sysctl_print_dir(struct ctl_dir *dir) { if (dir->header.parent) sysctl_print_dir(dir->header.parent); pr_cont("%s/", dir->header.ctl_table[0].procname); } static int namecmp(const char *name1, int len1, const char *name2, int len2) { int minlen; int cmp; minlen = len1; if (minlen > len2) minlen = len2; cmp = memcmp(name1, name2, minlen); if (cmp == 0) cmp = len1 - len2; return cmp; } /* Called under sysctl_lock */ static struct ctl_table *find_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; struct rb_node *node = dir->root.rb_node; while (node) { struct ctl_node *ctl_node; const char *procname; int cmp; ctl_node = rb_entry(node, struct ctl_node, node); head = ctl_node->header; entry = &head->ctl_table[ctl_node - head->node]; procname = entry->procname; cmp = namecmp(name, namelen, procname, strlen(procname)); if (cmp < 0) node = node->rb_left; else if (cmp > 0) node = node->rb_right; else { *phead = head; return entry; } } return NULL; } static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) { struct rb_node *node = &head->node[entry - head->ctl_table].node; struct rb_node **p = &head->parent->root.rb_node; struct rb_node *parent = NULL; const char *name = entry->procname; int namelen = strlen(name); while (*p) { struct ctl_table_header *parent_head; struct ctl_table *parent_entry; struct ctl_node *parent_node; const char *parent_name; int cmp; parent = *p; parent_node = rb_entry(parent, struct ctl_node, node); parent_head = parent_node->header; parent_entry = &parent_head->ctl_table[parent_node - parent_head->node]; parent_name = parent_entry->procname; cmp = namecmp(name, namelen, parent_name, strlen(parent_name)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { pr_err("sysctl duplicate entry: "); sysctl_print_dir(head->parent); pr_cont("/%s\n", entry->procname); return -EEXIST; } } rb_link_node(node, parent, p); rb_insert_color(node, &head->parent->root); return 0; } static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) { struct rb_node *node = &head->node[entry - head->ctl_table].node; rb_erase(node, &head->parent->root); } static void init_header(struct ctl_table_header *head, struct ctl_table_root *root, struct ctl_table_set *set, struct ctl_node *node, struct ctl_table *table) { head->ctl_table = table; head->ctl_table_arg = table; head->used = 0; head->count = 1; head->nreg = 1; head->unregistering = NULL; head->root = root; head->set = set; head->parent = NULL; head->node = node; if (node) { struct ctl_table *entry; for (entry = table; entry->procname; entry++, node++) node->header = head; } } static void erase_header(struct ctl_table_header *head) { struct ctl_table *entry; for (entry = head->ctl_table; entry->procname; entry++) erase_entry(head, entry); } static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header) { struct ctl_table *entry; int err; /* Is this a permanently empty directory? */ if (is_empty_dir(&dir->header)) return -EROFS; /* Am I creating a permanently empty directory? */ if (header->ctl_table == sysctl_mount_point) { if (!RB_EMPTY_ROOT(&dir->root)) return -EINVAL; set_empty_dir(dir); } dir->header.nreg++; header->parent = dir; err = insert_links(header); if (err) goto fail_links; for (entry = header->ctl_table; entry->procname; entry++) { err = insert_entry(header, entry); if (err) goto fail; } return 0; fail: erase_header(header); put_links(header); fail_links: if (header->ctl_table == sysctl_mount_point) clear_empty_dir(dir); header->parent = NULL; drop_sysctl_table(&dir->header); return err; } /* called under sysctl_lock */ static int use_table(struct ctl_table_header *p) { if (unlikely(p->unregistering)) return 0; p->used++; return 1; } /* called under sysctl_lock */ static void unuse_table(struct ctl_table_header *p) { if (!--p->used) if (unlikely(p->unregistering)) complete(p->unregistering); } /* called under sysctl_lock, will reacquire if has to wait */ static void start_unregistering(struct ctl_table_header *p) { /* * if p->used is 0, nobody will ever touch that entry again; * we'll eliminate all paths to it before dropping sysctl_lock */ if (unlikely(p->used)) { struct completion wait; init_completion(&wait); p->unregistering = &wait; spin_unlock(&sysctl_lock); wait_for_completion(&wait); spin_lock(&sysctl_lock); } else { /* anything non-NULL; we'll never dereference it */ p->unregistering = ERR_PTR(-EINVAL); } /* * do not remove from the list until nobody holds it; walking the * list in do_sysctl() relies on that. */ erase_header(p); } static void sysctl_head_get(struct ctl_table_header *head) { spin_lock(&sysctl_lock); head->count++; spin_unlock(&sysctl_lock); } void sysctl_head_put(struct ctl_table_header *head) { spin_lock(&sysctl_lock); if (!--head->count) kfree_rcu(head, rcu); spin_unlock(&sysctl_lock); } static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head) { BUG_ON(!head); spin_lock(&sysctl_lock); if (!use_table(head)) head = ERR_PTR(-ENOENT); spin_unlock(&sysctl_lock); return head; } static void sysctl_head_finish(struct ctl_table_header *head) { if (!head) return; spin_lock(&sysctl_lock); unuse_table(head); spin_unlock(&sysctl_lock); } static struct ctl_table_set * lookup_header_set(struct ctl_table_root *root) { struct ctl_table_set *set = &root->default_set; if (root->lookup) set = root->lookup(root); return set; } static struct ctl_table *lookup_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; spin_lock(&sysctl_lock); entry = find_entry(&head, dir, name, namelen); if (entry && use_table(head)) *phead = head; else entry = NULL; spin_unlock(&sysctl_lock); return entry; } static struct ctl_node *first_usable_entry(struct rb_node *node) { struct ctl_node *ctl_node; for (;node; node = rb_next(node)) { ctl_node = rb_entry(node, struct ctl_node, node); if (use_table(ctl_node->header)) return ctl_node; } return NULL; } static void first_entry(struct ctl_dir *dir, struct ctl_table_header **phead, struct ctl_table **pentry) { struct ctl_table_header *head = NULL; struct ctl_table *entry = NULL; struct ctl_node *ctl_node; spin_lock(&sysctl_lock); ctl_node = first_usable_entry(rb_first(&dir->root)); spin_unlock(&sysctl_lock); if (ctl_node) { head = ctl_node->header; entry = &head->ctl_table[ctl_node - head->node]; } *phead = head; *pentry = entry; } static void next_entry(struct ctl_table_header **phead, struct ctl_table **pentry) { struct ctl_table_header *head = *phead; struct ctl_table *entry = *pentry; struct ctl_node *ctl_node = &head->node[entry - head->ctl_table]; spin_lock(&sysctl_lock); unuse_table(head); ctl_node = first_usable_entry(rb_next(&ctl_node->node)); spin_unlock(&sysctl_lock); head = NULL; if (ctl_node) { head = ctl_node->header; entry = &head->ctl_table[ctl_node - head->node]; } *phead = head; *pentry = entry; } void register_sysctl_root(struct ctl_table_root *root) { } /* * sysctl_perm does NOT grant the superuser all rights automatically, because * some sysctl variables are readonly even to root. */ static int test_perm(int mode, int op) { if (uid_eq(current_euid(), GLOBAL_ROOT_UID)) mode >>= 6; else if (in_egroup_p(GLOBAL_ROOT_GID)) mode >>= 3; if ((op & ~mode & (MAY_READ|MAY_WRITE|MAY_EXEC)) == 0) return 0; return -EACCES; } static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, int op) { struct ctl_table_root *root = head->root; int mode; if (root->permissions) mode = root->permissions(head, table); else mode = table->mode; return test_perm(mode, op); } static struct inode *proc_sys_make_inode(struct super_block *sb, struct ctl_table_header *head, struct ctl_table *table) { struct ctl_table_root *root = head->root; struct inode *inode; struct proc_inode *ei; inode = new_inode(sb); if (!inode) goto out; inode->i_ino = get_next_ino(); sysctl_head_get(head); ei = PROC_I(inode); ei->sysctl = head; ei->sysctl_entry = table; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); inode->i_mode = table->mode; if (!S_ISDIR(table->mode)) { inode->i_mode |= S_IFREG; inode->i_op = &proc_sys_inode_operations; inode->i_fop = &proc_sys_file_operations; } else { inode->i_mode |= S_IFDIR; inode->i_op = &proc_sys_dir_operations; inode->i_fop = &proc_sys_dir_file_operations; if (is_empty_dir(head)) make_empty_dir_inode(inode); } if (root->set_ownership) root->set_ownership(head, table, &inode->i_uid, &inode->i_gid); out: return inode; } static struct ctl_table_header *grab_header(struct inode *inode) { struct ctl_table_header *head = PROC_I(inode)->sysctl; if (!head) head = &sysctl_table_root.default_set.dir.header; return sysctl_head_grab(head); } static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct ctl_table_header *head = grab_header(dir); struct ctl_table_header *h = NULL; const struct qstr *name = &dentry->d_name; struct ctl_table *p; struct inode *inode; struct dentry *err = ERR_PTR(-ENOENT); struct ctl_dir *ctl_dir; int ret; if (IS_ERR(head)) return ERR_CAST(head); ctl_dir = container_of(head, struct ctl_dir, header); p = lookup_entry(&h, ctl_dir, name->name, name->len); if (!p) goto out; if (S_ISLNK(p->mode)) { ret = sysctl_follow_link(&h, &p); err = ERR_PTR(ret); if (ret) goto out; } err = ERR_PTR(-ENOMEM); inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); if (!inode) goto out; err = NULL; d_set_d_op(dentry, &proc_sys_dentry_operations); d_add(dentry, inode); out: if (h) sysctl_head_finish(h); sysctl_head_finish(head); return err; } static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, size_t count, loff_t *ppos, int write) { struct inode *inode = file_inode(filp); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; ssize_t error; size_t res; if (IS_ERR(head)) return PTR_ERR(head); /* * At this point we know that the sysctl was not unregistered * and won't be until we finish. */ error = -EPERM; if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ)) goto out; /* if that can happen at all, it should be -EINVAL, not -EISDIR */ error = -EINVAL; if (!table->proc_handler) goto out; /* careful: calling conventions are nasty here */ res = count; error = table->proc_handler(table, write, buf, &res, ppos); if (!error) error = res; out: sysctl_head_finish(head); return error; } static ssize_t proc_sys_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 0); } static ssize_t proc_sys_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 1); } static int proc_sys_open(struct inode *inode, struct file *filp) { struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; /* sysctl was unregistered */ if (IS_ERR(head)) return PTR_ERR(head); if (table->poll) filp->private_data = proc_sys_poll_event(table->poll); sysctl_head_finish(head); return 0; } static unsigned int proc_sys_poll(struct file *filp, poll_table *wait) { struct inode *inode = file_inode(filp); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; unsigned int ret = DEFAULT_POLLMASK; unsigned long event; /* sysctl was unregistered */ if (IS_ERR(head)) return POLLERR | POLLHUP; if (!table->proc_handler) goto out; if (!table->poll) goto out; event = (unsigned long)filp->private_data; poll_wait(filp, &table->poll->wait, wait); if (event != atomic_read(&table->poll->event)) { filp->private_data = proc_sys_poll_event(table->poll); ret = POLLIN | POLLRDNORM | POLLERR | POLLPRI; } out: sysctl_head_finish(head); return ret; } static bool proc_sys_fill_cache(struct file *file, struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) { struct dentry *child, *dir = file->f_path.dentry; struct inode *inode; struct qstr qname; ino_t ino = 0; unsigned type = DT_UNKNOWN; qname.name = table->procname; qname.len = strlen(table->procname); qname.hash = full_name_hash(dir, qname.name, qname.len); child = d_lookup(dir, &qname); if (!child) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); child = d_alloc_parallel(dir, &qname, &wq); if (IS_ERR(child)) return false; if (d_in_lookup(child)) { inode = proc_sys_make_inode(dir->d_sb, head, table); if (!inode) { d_lookup_done(child); dput(child); return false; } d_set_d_op(child, &proc_sys_dentry_operations); d_add(child, inode); } } inode = d_inode(child); ino = inode->i_ino; type = inode->i_mode >> 12; dput(child); return dir_emit(ctx, qname.name, qname.len, ino, type); } static bool proc_sys_link_fill_cache(struct file *file, struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) { bool ret = true; head = sysctl_head_grab(head); if (S_ISLNK(table->mode)) { /* It is not an error if we can not follow the link ignore it */ int err = sysctl_follow_link(&head, &table); if (err) goto out; } ret = proc_sys_fill_cache(file, ctx, head, table); out: sysctl_head_finish(head); return ret; } static int scan(struct ctl_table_header *head, struct ctl_table *table, unsigned long *pos, struct file *file, struct dir_context *ctx) { bool res; if ((*pos)++ < ctx->pos) return true; if (unlikely(S_ISLNK(table->mode))) res = proc_sys_link_fill_cache(file, ctx, head, table); else res = proc_sys_fill_cache(file, ctx, head, table); if (res) ctx->pos = *pos; return res; } static int proc_sys_readdir(struct file *file, struct dir_context *ctx) { struct ctl_table_header *head = grab_header(file_inode(file)); struct ctl_table_header *h = NULL; struct ctl_table *entry; struct ctl_dir *ctl_dir; unsigned long pos; if (IS_ERR(head)) return PTR_ERR(head); ctl_dir = container_of(head, struct ctl_dir, header); if (!dir_emit_dots(file, ctx)) goto out; pos = 2; for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) { if (!scan(h, entry, &pos, file, ctx)) { sysctl_head_finish(h); break; } } out: sysctl_head_finish(head); return 0; } static int proc_sys_permission(struct inode *inode, int mask) { /* * sysctl entries that are not writeable, * are _NOT_ writeable, capabilities or not. */ struct ctl_table_header *head; struct ctl_table *table; int error; /* Executable files are not allowed under /proc/sys/ */ if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) return -EACCES; head = grab_header(inode); if (IS_ERR(head)) return PTR_ERR(head); table = PROC_I(inode)->sysctl_entry; if (!table) /* global root - r-xr-xr-x */ error = mask & MAY_WRITE ? -EACCES : 0; else /* Use the permissions on the sysctl table entry */ error = sysctl_perm(head, table, mask & ~MAY_NOT_BLOCK); sysctl_head_finish(head); return error; } static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error; if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) return -EPERM; error = setattr_prepare(dentry, attr); if (error) return error; setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; } static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = d_inode(dentry); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; if (IS_ERR(head)) return PTR_ERR(head); generic_fillattr(inode, stat); if (table) stat->mode = (stat->mode & S_IFMT) | table->mode; sysctl_head_finish(head); return 0; } static const struct file_operations proc_sys_file_operations = { .open = proc_sys_open, .poll = proc_sys_poll, .read = proc_sys_read, .write = proc_sys_write, .llseek = default_llseek, }; static const struct file_operations proc_sys_dir_file_operations = { .read = generic_read_dir, .iterate_shared = proc_sys_readdir, .llseek = generic_file_llseek, }; static const struct inode_operations proc_sys_inode_operations = { .permission = proc_sys_permission, .setattr = proc_sys_setattr, .getattr = proc_sys_getattr, }; static const struct inode_operations proc_sys_dir_operations = { .lookup = proc_sys_lookup, .permission = proc_sys_permission, .setattr = proc_sys_setattr, .getattr = proc_sys_getattr, }; static int proc_sys_revalidate(struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; return !PROC_I(d_inode(dentry))->sysctl->unregistering; } static int proc_sys_delete(const struct dentry *dentry) { return !!PROC_I(d_inode(dentry))->sysctl->unregistering; } static int sysctl_is_seen(struct ctl_table_header *p) { struct ctl_table_set *set = p->set; int res; spin_lock(&sysctl_lock); if (p->unregistering) res = 0; else if (!set->is_seen) res = 1; else res = set->is_seen(set); spin_unlock(&sysctl_lock); return res; } static int proc_sys_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct ctl_table_header *head; struct inode *inode; /* Although proc doesn't have negative dentries, rcu-walk means * that inode here can be NULL */ /* AV: can it, indeed? */ inode = d_inode_rcu(dentry); if (!inode) return 1; if (name->len != len) return 1; if (memcmp(name->name, str, len)) return 1; head = rcu_dereference(PROC_I(inode)->sysctl); return !head || !sysctl_is_seen(head); } static const struct dentry_operations proc_sys_dentry_operations = { .d_revalidate = proc_sys_revalidate, .d_delete = proc_sys_delete, .d_compare = proc_sys_compare, }; static struct ctl_dir *find_subdir(struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; entry = find_entry(&head, dir, name, namelen); if (!entry) return ERR_PTR(-ENOENT); if (!S_ISDIR(entry->mode)) return ERR_PTR(-ENOTDIR); return container_of(head, struct ctl_dir, header); } static struct ctl_dir *new_dir(struct ctl_table_set *set, const char *name, int namelen) { struct ctl_table *table; struct ctl_dir *new; struct ctl_node *node; char *new_name; new = kzalloc(sizeof(*new) + sizeof(struct ctl_node) + sizeof(struct ctl_table)*2 + namelen + 1, GFP_KERNEL); if (!new) return NULL; node = (struct ctl_node *)(new + 1); table = (struct ctl_table *)(node + 1); new_name = (char *)(table + 2); memcpy(new_name, name, namelen); new_name[namelen] = '\0'; table[0].procname = new_name; table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO; init_header(&new->header, set->dir.header.root, set, node, table); return new; } /** * get_subdir - find or create a subdir with the specified name. * @dir: Directory to create the subdirectory in * @name: The name of the subdirectory to find or create * @namelen: The length of name * * Takes a directory with an elevated reference count so we know that * if we drop the lock the directory will not go away. Upon success * the reference is moved from @dir to the returned subdirectory. * Upon error an error code is returned and the reference on @dir is * simply dropped. */ static struct ctl_dir *get_subdir(struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_set *set = dir->header.set; struct ctl_dir *subdir, *new = NULL; int err; spin_lock(&sysctl_lock); subdir = find_subdir(dir, name, namelen); if (!IS_ERR(subdir)) goto found; if (PTR_ERR(subdir) != -ENOENT) goto failed; spin_unlock(&sysctl_lock); new = new_dir(set, name, namelen); spin_lock(&sysctl_lock); subdir = ERR_PTR(-ENOMEM); if (!new) goto failed; /* Was the subdir added while we dropped the lock? */ subdir = find_subdir(dir, name, namelen); if (!IS_ERR(subdir)) goto found; if (PTR_ERR(subdir) != -ENOENT) goto failed; /* Nope. Use the our freshly made directory entry. */ err = insert_header(dir, &new->header); subdir = ERR_PTR(err); if (err) goto failed; subdir = new; found: subdir->header.nreg++; failed: if (IS_ERR(subdir)) { pr_err("sysctl could not get directory: "); sysctl_print_dir(dir); pr_cont("/%*.*s %ld\n", namelen, namelen, name, PTR_ERR(subdir)); } drop_sysctl_table(&dir->header); if (new) drop_sysctl_table(&new->header); spin_unlock(&sysctl_lock); return subdir; } static struct ctl_dir *xlate_dir(struct ctl_table_set *set, struct ctl_dir *dir) { struct ctl_dir *parent; const char *procname; if (!dir->header.parent) return &set->dir; parent = xlate_dir(set, dir->header.parent); if (IS_ERR(parent)) return parent; procname = dir->header.ctl_table[0].procname; return find_subdir(parent, procname, strlen(procname)); } static int sysctl_follow_link(struct ctl_table_header **phead, struct ctl_table **pentry) { struct ctl_table_header *head; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_table *entry; struct ctl_dir *dir; int ret; ret = 0; spin_lock(&sysctl_lock); root = (*pentry)->data; set = lookup_header_set(root); dir = xlate_dir(set, (*phead)->parent); if (IS_ERR(dir)) ret = PTR_ERR(dir); else { const char *procname = (*pentry)->procname; head = NULL; entry = find_entry(&head, dir, procname, strlen(procname)); ret = -ENOENT; if (entry && use_table(head)) { unuse_table(*phead); *phead = head; *pentry = entry; ret = 0; } } spin_unlock(&sysctl_lock); return ret; } static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("sysctl table check failed: %s/%s %pV\n", path, table->procname, &vaf); va_end(args); return -EINVAL; } static int sysctl_check_table(const char *path, struct ctl_table *table) { int err = 0; for (; table->procname; table++) { if (table->child) err = sysctl_err(path, table, "Not a file"); if ((table->proc_handler == proc_dostring) || (table->proc_handler == proc_dointvec) || (table->proc_handler == proc_dointvec_minmax) || (table->proc_handler == proc_dointvec_jiffies) || (table->proc_handler == proc_dointvec_userhz_jiffies) || (table->proc_handler == proc_dointvec_ms_jiffies) || (table->proc_handler == proc_doulongvec_minmax) || (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { if (!table->data) err = sysctl_err(path, table, "No data"); if (!table->maxlen) err = sysctl_err(path, table, "No maxlen"); } if (!table->proc_handler) err = sysctl_err(path, table, "No proc_handler"); if ((table->mode & (S_IRUGO|S_IWUGO)) != table->mode) err = sysctl_err(path, table, "bogus .mode 0%o", table->mode); } return err; } static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table, struct ctl_table_root *link_root) { struct ctl_table *link_table, *entry, *link; struct ctl_table_header *links; struct ctl_node *node; char *link_name; int nr_entries, name_bytes; name_bytes = 0; nr_entries = 0; for (entry = table; entry->procname; entry++) { nr_entries++; name_bytes += strlen(entry->procname) + 1; } links = kzalloc(sizeof(struct ctl_table_header) + sizeof(struct ctl_node)*nr_entries + sizeof(struct ctl_table)*(nr_entries + 1) + name_bytes, GFP_KERNEL); if (!links) return NULL; node = (struct ctl_node *)(links + 1); link_table = (struct ctl_table *)(node + nr_entries); link_name = (char *)&link_table[nr_entries + 1]; for (link = link_table, entry = table; entry->procname; link++, entry++) { int len = strlen(entry->procname) + 1; memcpy(link_name, entry->procname, len); link->procname = link_name; link->mode = S_IFLNK|S_IRWXUGO; link->data = link_root; link_name += len; } init_header(links, dir->header.root, dir->header.set, node, link_table); links->nreg = nr_entries; return links; } static bool get_links(struct ctl_dir *dir, struct ctl_table *table, struct ctl_table_root *link_root) { struct ctl_table_header *head; struct ctl_table *entry, *link; /* Are there links available for every entry in table? */ for (entry = table; entry->procname; entry++) { const char *procname = entry->procname; link = find_entry(&head, dir, procname, strlen(procname)); if (!link) return false; if (S_ISDIR(link->mode) && S_ISDIR(entry->mode)) continue; if (S_ISLNK(link->mode) && (link->data == link_root)) continue; return false; } /* The checks passed. Increase the registration count on the links */ for (entry = table; entry->procname; entry++) { const char *procname = entry->procname; link = find_entry(&head, dir, procname, strlen(procname)); head->nreg++; } return true; } static int insert_links(struct ctl_table_header *head) { struct ctl_table_set *root_set = &sysctl_table_root.default_set; struct ctl_dir *core_parent = NULL; struct ctl_table_header *links; int err; if (head->set == root_set) return 0; core_parent = xlate_dir(root_set, head->parent); if (IS_ERR(core_parent)) return 0; if (get_links(core_parent, head->ctl_table, head->root)) return 0; core_parent->header.nreg++; spin_unlock(&sysctl_lock); links = new_links(core_parent, head->ctl_table, head->root); spin_lock(&sysctl_lock); err = -ENOMEM; if (!links) goto out; err = 0; if (get_links(core_parent, head->ctl_table, head->root)) { kfree(links); goto out; } err = insert_header(core_parent, links); if (err) kfree(links); out: drop_sysctl_table(&core_parent->header); return err; } /** * __register_sysctl_table - register a leaf sysctl table * @set: Sysctl tree to register on * @path: The path to the directory the sysctl table is in. * @table: the top-level table structure * * Register a sysctl table hierarchy. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. * * The members of the &struct ctl_table structure are used as follows: * * procname - the name of the sysctl file under /proc/sys. Set to %NULL to not * enter a sysctl file * * data - a pointer to data for use by proc_handler * * maxlen - the maximum size in bytes of the data * * mode - the file permissions for the /proc/sys file * * child - must be %NULL. * * proc_handler - the text handler routine (described below) * * extra1, extra2 - extra pointers usable by the proc handler routines * * Leaf nodes in the sysctl tree will be represented by a single file * under /proc; non-leaf nodes will be represented by directories. * * There must be a proc_handler routine for any terminal nodes. * Several default handlers are available to cover common cases - * * proc_dostring(), proc_dointvec(), proc_dointvec_jiffies(), * proc_dointvec_userhz_jiffies(), proc_dointvec_minmax(), * proc_doulongvec_ms_jiffies_minmax(), proc_doulongvec_minmax() * * It is the handler's job to read the input buffer from user memory * and process it. The handler should return 0 on success. * * This routine returns %NULL on a failure to register, and a pointer * to the table header on success. */ struct ctl_table_header *__register_sysctl_table( struct ctl_table_set *set, const char *path, struct ctl_table *table) { struct ctl_table_root *root = set->dir.header.root; struct ctl_table_header *header; const char *name, *nextname; struct ctl_dir *dir; struct ctl_table *entry; struct ctl_node *node; int nr_entries = 0; for (entry = table; entry->procname; entry++) nr_entries++; header = kzalloc(sizeof(struct ctl_table_header) + sizeof(struct ctl_node)*nr_entries, GFP_KERNEL); if (!header) return NULL; node = (struct ctl_node *)(header + 1); init_header(header, root, set, node, table); if (sysctl_check_table(path, table)) goto fail; spin_lock(&sysctl_lock); dir = &set->dir; /* Reference moved down the diretory tree get_subdir */ dir->header.nreg++; spin_unlock(&sysctl_lock); /* Find the directory for the ctl_table */ for (name = path; name; name = nextname) { int namelen; nextname = strchr(name, '/'); if (nextname) { namelen = nextname - name; nextname++; } else { namelen = strlen(name); } if (namelen == 0) continue; dir = get_subdir(dir, name, namelen); if (IS_ERR(dir)) goto fail; } spin_lock(&sysctl_lock); if (insert_header(dir, header)) goto fail_put_dir_locked; drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); return header; fail_put_dir_locked: drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); fail: kfree(header); dump_stack(); return NULL; } /** * register_sysctl - register a sysctl table * @path: The path to the directory the sysctl table is in. * @table: the table structure * * Register a sysctl table. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. * * See __register_sysctl_table for more details. */ struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table) { return __register_sysctl_table(&sysctl_table_root.default_set, path, table); } EXPORT_SYMBOL(register_sysctl); static char *append_path(const char *path, char *pos, const char *name) { int namelen; namelen = strlen(name); if (((pos - path) + namelen + 2) >= PATH_MAX) return NULL; memcpy(pos, name, namelen); pos[namelen] = '/'; pos[namelen + 1] = '\0'; pos += namelen + 1; return pos; } static int count_subheaders(struct ctl_table *table) { int has_files = 0; int nr_subheaders = 0; struct ctl_table *entry; /* special case: no directory and empty directory */ if (!table || !table->procname) return 1; for (entry = table; entry->procname; entry++) { if (entry->child) nr_subheaders += count_subheaders(entry->child); else has_files = 1; } return nr_subheaders + has_files; } static int register_leaf_sysctl_tables(const char *path, char *pos, struct ctl_table_header ***subheader, struct ctl_table_set *set, struct ctl_table *table) { struct ctl_table *ctl_table_arg = NULL; struct ctl_table *entry, *files; int nr_files = 0; int nr_dirs = 0; int err = -ENOMEM; for (entry = table; entry->procname; entry++) { if (entry->child) nr_dirs++; else nr_files++; } files = table; /* If there are mixed files and directories we need a new table */ if (nr_dirs && nr_files) { struct ctl_table *new; files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1), GFP_KERNEL); if (!files) goto out; ctl_table_arg = files; for (new = files, entry = table; entry->procname; entry++) { if (entry->child) continue; *new = *entry; new++; } } /* Register everything except a directory full of subdirectories */ if (nr_files || !nr_dirs) { struct ctl_table_header *header; header = __register_sysctl_table(set, path, files); if (!header) { kfree(ctl_table_arg); goto out; } /* Remember if we need to free the file table */ header->ctl_table_arg = ctl_table_arg; **subheader = header; (*subheader)++; } /* Recurse into the subdirectories. */ for (entry = table; entry->procname; entry++) { char *child_pos; if (!entry->child) continue; err = -ENAMETOOLONG; child_pos = append_path(path, pos, entry->procname); if (!child_pos) goto out; err = register_leaf_sysctl_tables(path, child_pos, subheader, set, entry->child); pos[0] = '\0'; if (err) goto out; } err = 0; out: /* On failure our caller will unregister all registered subheaders */ return err; } /** * __register_sysctl_paths - register a sysctl table hierarchy * @set: Sysctl tree to register on * @path: The path to the directory the sysctl table is in. * @table: the top-level table structure * * Register a sysctl table hierarchy. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. * * See __register_sysctl_table for more details. */ struct ctl_table_header *__register_sysctl_paths( struct ctl_table_set *set, const struct ctl_path *path, struct ctl_table *table) { struct ctl_table *ctl_table_arg = table; int nr_subheaders = count_subheaders(table); struct ctl_table_header *header = NULL, **subheaders, **subheader; const struct ctl_path *component; char *new_path, *pos; pos = new_path = kmalloc(PATH_MAX, GFP_KERNEL); if (!new_path) return NULL; pos[0] = '\0'; for (component = path; component->procname; component++) { pos = append_path(new_path, pos, component->procname); if (!pos) goto out; } while (table->procname && table->child && !table[1].procname) { pos = append_path(new_path, pos, table->procname); if (!pos) goto out; table = table->child; } if (nr_subheaders == 1) { header = __register_sysctl_table(set, new_path, table); if (header) header->ctl_table_arg = ctl_table_arg; } else { header = kzalloc(sizeof(*header) + sizeof(*subheaders)*nr_subheaders, GFP_KERNEL); if (!header) goto out; subheaders = (struct ctl_table_header **) (header + 1); subheader = subheaders; header->ctl_table_arg = ctl_table_arg; if (register_leaf_sysctl_tables(new_path, pos, &subheader, set, table)) goto err_register_leaves; } out: kfree(new_path); return header; err_register_leaves: while (subheader > subheaders) { struct ctl_table_header *subh = *(--subheader); struct ctl_table *table = subh->ctl_table_arg; unregister_sysctl_table(subh); kfree(table); } kfree(header); header = NULL; goto out; } /** * register_sysctl_table_path - register a sysctl table hierarchy * @path: The path to the directory the sysctl table is in. * @table: the top-level table structure * * Register a sysctl table hierarchy. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. * * See __register_sysctl_paths for more details. */ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, struct ctl_table *table) { return __register_sysctl_paths(&sysctl_table_root.default_set, path, table); } EXPORT_SYMBOL(register_sysctl_paths); /** * register_sysctl_table - register a sysctl table hierarchy * @table: the top-level table structure * * Register a sysctl table hierarchy. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. * * See register_sysctl_paths for more details. */ struct ctl_table_header *register_sysctl_table(struct ctl_table *table) { static const struct ctl_path null_path[] = { {} }; return register_sysctl_paths(null_path, table); } EXPORT_SYMBOL(register_sysctl_table); static void put_links(struct ctl_table_header *header) { struct ctl_table_set *root_set = &sysctl_table_root.default_set; struct ctl_table_root *root = header->root; struct ctl_dir *parent = header->parent; struct ctl_dir *core_parent; struct ctl_table *entry; if (header->set == root_set) return; core_parent = xlate_dir(root_set, parent); if (IS_ERR(core_parent)) return; for (entry = header->ctl_table; entry->procname; entry++) { struct ctl_table_header *link_head; struct ctl_table *link; const char *name = entry->procname; link = find_entry(&link_head, core_parent, name, strlen(name)); if (link && ((S_ISDIR(link->mode) && S_ISDIR(entry->mode)) || (S_ISLNK(link->mode) && (link->data == root)))) { drop_sysctl_table(link_head); } else { pr_err("sysctl link missing during unregister: "); sysctl_print_dir(parent); pr_cont("/%s\n", name); } } } static void drop_sysctl_table(struct ctl_table_header *header) { struct ctl_dir *parent = header->parent; if (--header->nreg) return; put_links(header); start_unregistering(header); if (!--header->count) kfree_rcu(header, rcu); if (parent) drop_sysctl_table(&parent->header); } /** * unregister_sysctl_table - unregister a sysctl table hierarchy * @header: the header returned from register_sysctl_table * * Unregisters the sysctl table and all children. proc entries may not * actually be removed until they are no longer used by anyone. */ void unregister_sysctl_table(struct ctl_table_header * header) { int nr_subheaders; might_sleep(); if (header == NULL) return; nr_subheaders = count_subheaders(header->ctl_table_arg); if (unlikely(nr_subheaders > 1)) { struct ctl_table_header **subheaders; int i; subheaders = (struct ctl_table_header **)(header + 1); for (i = nr_subheaders -1; i >= 0; i--) { struct ctl_table_header *subh = subheaders[i]; struct ctl_table *table = subh->ctl_table_arg; unregister_sysctl_table(subh); kfree(table); } kfree(header); return; } spin_lock(&sysctl_lock); drop_sysctl_table(header); spin_unlock(&sysctl_lock); } EXPORT_SYMBOL(unregister_sysctl_table); void setup_sysctl_set(struct ctl_table_set *set, struct ctl_table_root *root, int (*is_seen)(struct ctl_table_set *)) { memset(set, 0, sizeof(*set)); set->is_seen = is_seen; init_header(&set->dir.header, root, set, NULL, root_table); } void retire_sysctl_set(struct ctl_table_set *set) { WARN_ON(!RB_EMPTY_ROOT(&set->dir.root)); } int __init proc_sys_init(void) { struct proc_dir_entry *proc_sys_root; proc_sys_root = proc_mkdir("sys", NULL); proc_sys_root->proc_iops = &proc_sys_dir_operations; proc_sys_root->proc_fops = &proc_sys_dir_file_operations; proc_sys_root->nlink = 0; return sysctl_init(); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_5397_0
crossvul-cpp_data_bad_2296_0
/* * Copyright (c) Christos Zoulas 2003. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: readelf.c,v 1.104 2014/10/17 15:49:00 christos Exp $") #endif #ifdef BUILTIN_ELF #include <string.h> #include <ctype.h> #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "readelf.h" #include "magic.h" #ifdef ELFCORE private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *); #endif private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int); private int doshn(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int, int); private size_t donote(struct magic_set *, void *, size_t, size_t, int, int, size_t, int *); #define ELF_ALIGN(a) ((((a) + align - 1) / align) * align) #define isquote(c) (strchr("'\"`", (c)) != NULL) private uint16_t getu16(int, uint16_t); private uint32_t getu32(int, uint32_t); private uint64_t getu64(int, uint64_t); #define MAX_PHNUM 256 #define MAX_SHNUM 1024 private int toomany(struct magic_set *ms, const char *name, uint16_t num) { if (file_printf(ms, ", too many %s header sections (%u)", name, num ) == -1) return -1; return 0; } private uint16_t getu16(int swap, uint16_t value) { union { uint16_t ui; char c[2]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[1]; retval.c[1] = tmpval.c[0]; return retval.ui; } else return value; } private uint32_t getu32(int swap, uint32_t value) { union { uint32_t ui; char c[4]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[3]; retval.c[1] = tmpval.c[2]; retval.c[2] = tmpval.c[1]; retval.c[3] = tmpval.c[0]; return retval.ui; } else return value; } private uint64_t getu64(int swap, uint64_t value) { union { uint64_t ui; char c[8]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[7]; retval.c[1] = tmpval.c[6]; retval.c[2] = tmpval.c[5]; retval.c[3] = tmpval.c[4]; retval.c[4] = tmpval.c[3]; retval.c[5] = tmpval.c[2]; retval.c[6] = tmpval.c[1]; retval.c[7] = tmpval.c[0]; return retval.ui; } else return value; } #define elf_getu16(swap, value) getu16(swap, value) #define elf_getu32(swap, value) getu32(swap, value) #define elf_getu64(swap, value) getu64(swap, value) #define xsh_addr (clazz == ELFCLASS32 \ ? (void *)&sh32 \ : (void *)&sh64) #define xsh_sizeof (clazz == ELFCLASS32 \ ? sizeof(sh32) \ : sizeof(sh64)) #define xsh_size (size_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_size) \ : elf_getu64(swap, sh64.sh_size)) #define xsh_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_offset) \ : elf_getu64(swap, sh64.sh_offset)) #define xsh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_type) \ : elf_getu32(swap, sh64.sh_type)) #define xsh_name (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_name) \ : elf_getu32(swap, sh64.sh_name)) #define xph_addr (clazz == ELFCLASS32 \ ? (void *) &ph32 \ : (void *) &ph64) #define xph_sizeof (clazz == ELFCLASS32 \ ? sizeof(ph32) \ : sizeof(ph64)) #define xph_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_type) \ : elf_getu32(swap, ph64.p_type)) #define xph_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_offset) \ : elf_getu64(swap, ph64.p_offset)) #define xph_align (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_align ? \ elf_getu32(swap, ph32.p_align) : 4) \ : (off_t) (ph64.p_align ? \ elf_getu64(swap, ph64.p_align) : 4))) #define xph_filesz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_filesz) \ : elf_getu64(swap, ph64.p_filesz))) #define xnh_addr (clazz == ELFCLASS32 \ ? (void *)&nh32 \ : (void *)&nh64) #define xph_memsz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_memsz) \ : elf_getu64(swap, ph64.p_memsz))) #define xnh_sizeof (clazz == ELFCLASS32 \ ? sizeof nh32 \ : sizeof nh64) #define xnh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_type) \ : elf_getu32(swap, nh64.n_type)) #define xnh_namesz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_namesz) \ : elf_getu32(swap, nh64.n_namesz)) #define xnh_descsz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_descsz) \ : elf_getu32(swap, nh64.n_descsz)) #define prpsoffsets(i) (clazz == ELFCLASS32 \ ? prpsoffsets32[i] \ : prpsoffsets64[i]) #define xcap_addr (clazz == ELFCLASS32 \ ? (void *)&cap32 \ : (void *)&cap64) #define xcap_sizeof (clazz == ELFCLASS32 \ ? sizeof cap32 \ : sizeof cap64) #define xcap_tag (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_tag) \ : elf_getu64(swap, cap64.c_tag)) #define xcap_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_un.c_val) \ : elf_getu64(swap, cap64.c_un.c_val)) #ifdef ELFCORE /* * Try larger offsets first to avoid false matches * from earlier data that happen to look like strings. */ static const size_t prpsoffsets32[] = { #ifdef USE_NT_PSINFO 104, /* SunOS 5.x (command line) */ 88, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 100, /* SunOS 5.x (command line) */ 84, /* SunOS 5.x (short name) */ 44, /* Linux (command line) */ 28, /* Linux 2.0.36 (short name) */ 8, /* FreeBSD */ }; static const size_t prpsoffsets64[] = { #ifdef USE_NT_PSINFO 152, /* SunOS 5.x (command line) */ 136, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 136, /* SunOS 5.x, 64-bit (command line) */ 120, /* SunOS 5.x, 64-bit (short name) */ 56, /* Linux (command line) */ 40, /* Linux (tested on core from 2.4.x, short name) */ 16, /* FreeBSD, 64-bit */ }; #define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0]) #define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0]) #define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64) /* * Look through the program headers of an executable image, searching * for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or * "FreeBSD"; if one is found, try looking in various places in its * contents for a 16-character string containing only printable * characters - if found, that string should be the name of the program * that dropped core. Note: right after that 16-character string is, * at least in SunOS 5.x (and possibly other SVR4-flavored systems) and * Linux, a longer string (80 characters, in 5.x, probably other * SVR4-flavored systems, and Linux) containing the start of the * command line for that program. * * SunOS 5.x core files contain two PT_NOTE sections, with the types * NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the * same info about the command name and command line, so it probably * isn't worthwhile to look for NT_PSINFO, but the offsets are provided * above (see USE_NT_PSINFO), in case we ever decide to do so. The * NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent; * the SunOS 5.x file command relies on this (and prefers the latter). * * The signal number probably appears in a section of type NT_PRSTATUS, * but that's also rather OS-dependent, in ways that are harder to * dissect with heuristics, so I'm not bothering with the signal number. * (I suppose the signal number could be of interest in situations where * you don't have the binary of the program that dropped core; if you * *do* have that binary, the debugger will probably tell you what * signal it was.) */ #define OS_STYLE_SVR4 0 #define OS_STYLE_FREEBSD 1 #define OS_STYLE_NETBSD 2 private const char os_style_names[][8] = { "SVR4", "FreeBSD", "NetBSD", }; #define FLAGS_DID_CORE 0x01 #define FLAGS_DID_NOTE 0x02 #define FLAGS_DID_BUILD_ID 0x04 #define FLAGS_DID_CORE_STYLE 0x08 #define FLAGS_IS_CORE 0x10 private int dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; if (xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags); if (offset == 0) break; } } return 0; } #endif static void do_note_netbsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for NetBSD") == -1) return; /* * The version number used to be stuck as 199905, and was thus * basically content-free. Newer versions of NetBSD have fixed * this and now use the encoding of __NetBSD_Version__: * * MMmmrrpp00 * * M = major version * m = minor version * r = release ["",A-Z,Z[A-Z] but numeric] * p = patchlevel */ if (desc > 100000000U) { uint32_t ver_patch = (desc / 100) % 100; uint32_t ver_rel = (desc / 10000) % 100; uint32_t ver_min = (desc / 1000000) % 100; uint32_t ver_maj = desc / 100000000; if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1) return; if (ver_rel == 0 && ver_patch != 0) { if (file_printf(ms, ".%u", ver_patch) == -1) return; } else if (ver_rel != 0) { while (ver_rel > 26) { if (file_printf(ms, "Z") == -1) return; ver_rel -= 26; } if (file_printf(ms, "%c", 'A' + ver_rel - 1) == -1) return; } } } static void do_note_freebsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for FreeBSD") == -1) return; /* * Contents is __FreeBSD_version, whose relation to OS * versions is defined by a huge table in the Porter's * Handbook. This is the general scheme: * * Releases: * Mmp000 (before 4.10) * Mmi0p0 (before 5.0) * Mmm0p0 * * Development branches: * Mmpxxx (before 4.6) * Mmp1xx (before 4.10) * Mmi1xx (before 5.0) * M000xx (pre-M.0) * Mmm1xx * * M = major version * m = minor version * i = minor version increment (491000 -> 4.10) * p = patchlevel * x = revision * * The first release of FreeBSD to use ELF by default * was version 3.0. */ if (desc == 460002) { if (file_printf(ms, " 4.6.2") == -1) return; } else if (desc < 460100) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10) == -1) return; if (desc / 1000 % 10 > 0) if (file_printf(ms, ".%d", desc / 1000 % 10) == -1) return; if ((desc % 1000 > 0) || (desc % 100000 == 0)) if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc < 500000) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10 + desc / 1000 % 10) == -1) return; if (desc / 100 % 10 > 0) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } else { if (file_printf(ms, " %d.%d", desc / 100000, desc / 1000 % 100) == -1) return; if ((desc / 100 % 10 > 0) || (desc % 100000 / 100 == 0)) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } } private size_t donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size, int clazz, int swap, size_t align, int *flags) { Elf32_Nhdr nh32; Elf64_Nhdr nh64; size_t noff, doff; #ifdef ELFCORE int os_style = -1; #endif uint32_t namesz, descsz; unsigned char *nbuf = CAST(unsigned char *, vbuf); if (xnh_sizeof + offset > size) { /* * We're out of note headers. */ return xnh_sizeof + offset; } (void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof); offset += xnh_sizeof; namesz = xnh_namesz; descsz = xnh_descsz; if ((namesz == 0) && (descsz == 0)) { /* * We're out of note headers. */ return (offset >= size) ? offset : size; } if (namesz & 0x80000000) { (void)file_printf(ms, ", bad note name size 0x%lx", (unsigned long)namesz); return 0; } if (descsz & 0x80000000) { (void)file_printf(ms, ", bad note description size 0x%lx", (unsigned long)descsz); return 0; } noff = offset; doff = ELF_ALIGN(offset + namesz); if (offset + namesz > size) { /* * We're past the end of the buffer. */ return doff; } offset = ELF_ALIGN(doff + descsz); if (doff + descsz > size) { /* * We're past the end of the buffer. */ return (offset >= size) ? offset : size; } if ((*flags & (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) == (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) goto core; if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 && xnh_type == NT_GNU_VERSION && descsz == 2) { file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]); } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_VERSION && descsz == 16) { uint32_t desc[4]; (void)memcpy(desc, &nbuf[doff], sizeof(desc)); if (file_printf(ms, ", for GNU/") == -1) return size; switch (elf_getu32(swap, desc[0])) { case GNU_OS_LINUX: if (file_printf(ms, "Linux") == -1) return size; break; case GNU_OS_HURD: if (file_printf(ms, "Hurd") == -1) return size; break; case GNU_OS_SOLARIS: if (file_printf(ms, "Solaris") == -1) return size; break; case GNU_OS_KFREEBSD: if (file_printf(ms, "kFreeBSD") == -1) return size; break; case GNU_OS_KNETBSD: if (file_printf(ms, "kNetBSD") == -1) return size; break; default: if (file_printf(ms, "<unknown>") == -1) return size; } if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]), elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) { uint8_t desc[20]; uint32_t i; if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" : "sha1") == -1) return size; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return size; *flags |= FLAGS_DID_BUILD_ID; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 && xnh_type == NT_NETBSD_PAX && descsz == 4) { static const char *pax[] = { "+mprotect", "-mprotect", "+segvguard", "-segvguard", "+ASLR", "-ASLR", }; uint32_t desc; size_t i; int did = 0; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (desc && file_printf(ms, ", PaX: ") == -1) return size; for (i = 0; i < __arraycount(pax); i++) { if (((1 << i) & desc) == 0) continue; if (file_printf(ms, "%s%s", did++ ? "," : "", pax[i]) == -1) return size; } } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { switch (xnh_type) { case NT_NETBSD_VERSION: if (descsz == 4) { do_note_netbsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } break; case NT_NETBSD_MARCH: if (file_printf(ms, ", compiled for: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; case NT_NETBSD_CMODEL: if (file_printf(ms, ", compiler model: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; default: if (file_printf(ms, ", note=%u", xnh_type) == -1) return size; break; } return size; } if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) { if (xnh_type == NT_FREEBSD_VERSION && descsz == 4) { do_note_freebsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 && xnh_type == NT_OPENBSD_VERSION && descsz == 4) { if (file_printf(ms, ", for OpenBSD") == -1) return size; /* Content of note is always 0 */ *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 && xnh_type == NT_DRAGONFLY_VERSION && descsz == 4) { uint32_t desc; if (file_printf(ms, ", for DragonFly") == -1) return size; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, " %d.%d.%d", desc / 100000, desc / 10000 % 10, desc % 10000) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } core: /* * Sigh. The 2.0.36 kernel in Debian 2.1, at * least, doesn't correctly implement name * sections, in core dumps, as specified by * the "Program Linking" section of "UNIX(R) System * V Release 4 Programmer's Guide: ANSI C and * Programming Support Tools", because my copy * clearly says "The first 'namesz' bytes in 'name' * contain a *null-terminated* [emphasis mine] * character representation of the entry's owner * or originator", but the 2.0.36 kernel code * doesn't include the terminating null in the * name.... */ if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) || (namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) { os_style = OS_STYLE_SVR4; } if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) { os_style = OS_STYLE_FREEBSD; } if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11) == 0)) { os_style = OS_STYLE_NETBSD; } #ifdef ELFCORE if ((*flags & FLAGS_DID_CORE) != 0) return size; if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) { if (file_printf(ms, ", %s-style", os_style_names[os_style]) == -1) return size; *flags |= FLAGS_DID_CORE_STYLE; } switch (os_style) { case OS_STYLE_NETBSD: if (xnh_type == NT_NETBSD_CORE_PROCINFO) { uint32_t signo; /* * Extract the program name. It is at * offset 0x7c, and is up to 32-bytes, * including the terminating NUL. */ if (file_printf(ms, ", from '%.31s'", &nbuf[doff + 0x7c]) == -1) return size; /* * Extract the signal number. It is at * offset 0x08. */ (void)memcpy(&signo, &nbuf[doff + 0x08], sizeof(signo)); if (file_printf(ms, " (signal %u)", elf_getu32(swap, signo)) == -1) return size; *flags |= FLAGS_DID_CORE; return size; } break; default: if (xnh_type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) { size_t i, j; unsigned char c; /* * Extract the program name. We assume * it to be 16 characters (that's what it * is in SunOS 5.x and Linux). * * Unfortunately, it's at a different offset * in various OSes, so try multiple offsets. * If the characters aren't all printable, * reject it. */ for (i = 0; i < NOFFSETS; i++) { unsigned char *cname, *cp; size_t reloffset = prpsoffsets(i); size_t noffset = doff + reloffset; size_t k; for (j = 0; j < 16; j++, noffset++, reloffset++) { /* * Make sure we're not past * the end of the buffer; if * we are, just give up. */ if (noffset >= size) goto tryanother; /* * Make sure we're not past * the end of the contents; * if we are, this obviously * isn't the right offset. */ if (reloffset >= descsz) goto tryanother; c = nbuf[noffset]; if (c == '\0') { /* * A '\0' at the * beginning is * obviously wrong. * Any other '\0' * means we're done. */ if (j == 0) goto tryanother; else break; } else { /* * A nonprintable * character is also * wrong. */ if (!isprint(c) || isquote(c)) goto tryanother; } } /* * Well, that worked. */ /* * Try next offsets, in case this match is * in the middle of a string. */ for (k = i + 1 ; k < NOFFSETS ; k++) { size_t no; int adjust = 1; if (prpsoffsets(k) >= prpsoffsets(i)) continue; for (no = doff + prpsoffsets(k); no < doff + prpsoffsets(i); no++) adjust = adjust && isprint(nbuf[no]); if (adjust) i = k; } cname = (unsigned char *) &nbuf[doff + prpsoffsets(i)]; for (cp = cname; *cp && isprint(*cp); cp++) continue; /* * Linux apparently appends a space at the end * of the command line: remove it. */ while (cp > cname && isspace(cp[-1])) cp--; if (file_printf(ms, ", from '%.*s'", (int)(cp - cname), cname) == -1) return size; *flags |= FLAGS_DID_CORE; return size; tryanother: ; } } break; } #endif return offset; } /* SunOS 5.x hardware capability descriptions */ typedef struct cap_desc { uint64_t cd_mask; const char *cd_name; } cap_desc_t; static const cap_desc_t cap_desc_sparc[] = { { AV_SPARC_MUL32, "MUL32" }, { AV_SPARC_DIV32, "DIV32" }, { AV_SPARC_FSMULD, "FSMULD" }, { AV_SPARC_V8PLUS, "V8PLUS" }, { AV_SPARC_POPC, "POPC" }, { AV_SPARC_VIS, "VIS" }, { AV_SPARC_VIS2, "VIS2" }, { AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" }, { AV_SPARC_FMAF, "FMAF" }, { AV_SPARC_FJFMAU, "FJFMAU" }, { AV_SPARC_IMA, "IMA" }, { 0, NULL } }; static const cap_desc_t cap_desc_386[] = { { AV_386_FPU, "FPU" }, { AV_386_TSC, "TSC" }, { AV_386_CX8, "CX8" }, { AV_386_SEP, "SEP" }, { AV_386_AMD_SYSC, "AMD_SYSC" }, { AV_386_CMOV, "CMOV" }, { AV_386_MMX, "MMX" }, { AV_386_AMD_MMX, "AMD_MMX" }, { AV_386_AMD_3DNow, "AMD_3DNow" }, { AV_386_AMD_3DNowx, "AMD_3DNowx" }, { AV_386_FXSR, "FXSR" }, { AV_386_SSE, "SSE" }, { AV_386_SSE2, "SSE2" }, { AV_386_PAUSE, "PAUSE" }, { AV_386_SSE3, "SSE3" }, { AV_386_MON, "MON" }, { AV_386_CX16, "CX16" }, { AV_386_AHF, "AHF" }, { AV_386_TSCP, "TSCP" }, { AV_386_AMD_SSE4A, "AMD_SSE4A" }, { AV_386_POPCNT, "POPCNT" }, { AV_386_AMD_LZCNT, "AMD_LZCNT" }, { AV_386_SSSE3, "SSSE3" }, { AV_386_SSE4_1, "SSE4.1" }, { AV_386_SSE4_2, "SSE4.2" }, { 0, NULL } }; private int doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int mach, int strtab) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */ char name[50]; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) == -1) { file_badread(ms); return -1; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if (pread(fd, name, sizeof(name), name_off + xsh_name) == -1) { file_badread(ms); return -1; } name[sizeof(name) - 1] = '\0'; if (strcmp(name, ".debug_info") == 0) stripped = 0; if (pread(fd, xsh_addr, xsh_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) == -1) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "0x%" INT64_T_FORMAT "x = 0x%" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; } /* * Look through the program headers of an executable image, searching * for a PT_INTERP section; if one is found, it's dynamically linked, * otherwise it's statically linked. */ private int dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int sh_num) { Elf32_Phdr ph32; Elf64_Phdr ph64; const char *linking_style = "statically"; const char *shared_libraries = ""; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; size_t offset, align, len; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xph_type) { case PT_DYNAMIC: linking_style = "dynamically"; break; case PT_INTERP: shared_libraries = " (uses shared libs)"; break; default: if (xph_offset > fsize) { /* Maybe warn here? */ continue; } break; } /* Things we can determine when we seek */ switch (xph_type) { case PT_NOTE: if ((align = xph_align) & 0x80000000UL) { if (file_printf(ms, ", invalid note alignment 0x%lx", (unsigned long)align) == -1) return -1; align = 4; } if (sh_num) break; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); bufsize = pread(fd, nbuf, len, xph_offset); if (bufsize == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, align, flags); if (offset == 0) break; } break; default: break; } } if (file_printf(ms, ", %s linked%s", linking_style, shared_libraries) == -1) return -1; return 0; } protected int file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf, size_t nbytes) { union { int32_t l; char c[sizeof (int32_t)]; } u; int clazz; int swap; struct stat st; off_t fsize; int flags = 0; Elf32_Ehdr elf32hdr; Elf64_Ehdr elf64hdr; uint16_t type, phnum, shnum; if (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) return 0; /* * ELF executables have multiple section headers in arbitrary * file locations and thus file(1) cannot determine it from easily. * Instead we traverse thru all section headers until a symbol table * one is found or else the binary is stripped. * Return immediately if it's not ELF (so we avoid pipe2file unless needed). */ if (buf[EI_MAG0] != ELFMAG0 || (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1) || buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) return 0; /* * If we cannot seek, it must be a pipe, socket or fifo. */ if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE)) fd = file_pipe2file(ms, fd, buf, nbytes); if (fstat(fd, &st) == -1) { file_badread(ms); return -1; } fsize = st.st_size; clazz = buf[EI_CLASS]; switch (clazz) { case ELFCLASS32: #undef elf_getu #define elf_getu(a, b) elf_getu32(a, b) #undef elfhdr #define elfhdr elf32hdr #include "elfclass.h" case ELFCLASS64: #undef elf_getu #define elf_getu(a, b) elf_getu64(a, b) #undef elfhdr #define elfhdr elf64hdr #include "elfclass.h" default: if (file_printf(ms, ", unknown class %d", clazz) == -1) return -1; break; } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2296_0
crossvul-cpp_data_bad_2272_0
/* * inode.c * * PURPOSE * Inode handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 10/04/98 dgb Added rudimentary directory functions * 10/07/98 Fully working udf_block_map! It works! * 11/25/98 bmap altered to better support extents * 12/06/98 blf partition support in udf_iget, udf_block_map * and udf_read_inode * 12/12/98 rewrote udf_block_map to handle next extents and descs across * block boundaries (which is not actually allowed) * 12/20/98 added support for strategy 4096 * 03/07/99 rewrote udf_block_map (again) * New funcs, inode_bmap, udf_next_aext * 04/19/99 Support for writing device EA's for major/minor # */ #include "udfdecl.h" #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/slab.h> #include <linux/crc-itu-t.h> #include <linux/mpage.h> #include <linux/aio.h> #include "udf_i.h" #include "udf_sb.h" MODULE_AUTHOR("Ben Fennema"); MODULE_DESCRIPTION("Universal Disk Format Filesystem"); MODULE_LICENSE("GPL"); #define EXTENT_MERGE_SIZE 5 static umode_t udf_convert_permissions(struct fileEntry *); static int udf_update_inode(struct inode *, int); static int udf_sync_inode(struct inode *inode); static int udf_alloc_i_data(struct inode *inode, size_t size); static sector_t inode_getblk(struct inode *, sector_t, int *, int *); static int8_t udf_insert_aext(struct inode *, struct extent_position, struct kernel_lb_addr, uint32_t); static void udf_split_extents(struct inode *, int *, int, int, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_prealloc_extents(struct inode *, int, int, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_merge_extents(struct inode *, struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); static void udf_update_extents(struct inode *, struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int, struct extent_position *); static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); static void __udf_clear_extent_cache(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->cached_extent.lstart != -1) { brelse(iinfo->cached_extent.epos.bh); iinfo->cached_extent.lstart = -1; } } /* Invalidate extent cache */ static void udf_clear_extent_cache(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); spin_lock(&iinfo->i_extent_cache_lock); __udf_clear_extent_cache(inode); spin_unlock(&iinfo->i_extent_cache_lock); } /* Return contents of extent cache */ static int udf_read_extent_cache(struct inode *inode, loff_t bcount, loff_t *lbcount, struct extent_position *pos) { struct udf_inode_info *iinfo = UDF_I(inode); int ret = 0; spin_lock(&iinfo->i_extent_cache_lock); if ((iinfo->cached_extent.lstart <= bcount) && (iinfo->cached_extent.lstart != -1)) { /* Cache hit */ *lbcount = iinfo->cached_extent.lstart; memcpy(pos, &iinfo->cached_extent.epos, sizeof(struct extent_position)); if (pos->bh) get_bh(pos->bh); ret = 1; } spin_unlock(&iinfo->i_extent_cache_lock); return ret; } /* Add extent to extent cache */ static void udf_update_extent_cache(struct inode *inode, loff_t estart, struct extent_position *pos, int next_epos) { struct udf_inode_info *iinfo = UDF_I(inode); spin_lock(&iinfo->i_extent_cache_lock); /* Invalidate previously cached extent */ __udf_clear_extent_cache(inode); if (pos->bh) get_bh(pos->bh); memcpy(&iinfo->cached_extent.epos, pos, sizeof(struct extent_position)); iinfo->cached_extent.lstart = estart; if (next_epos) switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: iinfo->cached_extent.epos.offset -= sizeof(struct short_ad); break; case ICBTAG_FLAG_AD_LONG: iinfo->cached_extent.epos.offset -= sizeof(struct long_ad); } spin_unlock(&iinfo->i_extent_cache_lock); } void udf_evict_inode(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); int want_delete = 0; if (!inode->i_nlink && !is_bad_inode(inode)) { want_delete = 1; udf_setsize(inode, 0); udf_update_inode(inode, IS_SYNC(inode)); } truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); clear_inode(inode); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && inode->i_size != iinfo->i_lenExtents) { udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", inode->i_ino, inode->i_mode, (unsigned long long)inode->i_size, (unsigned long long)iinfo->i_lenExtents); } kfree(iinfo->i_ext.i_data); iinfo->i_ext.i_data = NULL; udf_clear_extent_cache(inode); if (want_delete) { udf_free_inode(inode); } } static void udf_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; struct udf_inode_info *iinfo = UDF_I(inode); loff_t isize = inode->i_size; if (to > isize) { truncate_pagecache(inode, isize); if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } } } static int udf_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, udf_get_block, wbc); } static int udf_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, udf_get_block); } static int udf_readpage(struct file *file, struct page *page) { return mpage_readpage(page, udf_get_block); } static int udf_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, udf_get_block); } static int udf_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block); if (unlikely(ret)) udf_write_failed(mapping, pos + len); return ret; } static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block); if (unlikely(ret < 0 && (rw & WRITE))) udf_write_failed(mapping, offset + count); return ret; } static sector_t udf_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, udf_get_block); } const struct address_space_operations udf_aops = { .readpage = udf_readpage, .readpages = udf_readpages, .writepage = udf_writepage, .writepages = udf_writepages, .write_begin = udf_write_begin, .write_end = generic_write_end, .direct_IO = udf_direct_IO, .bmap = udf_bmap, }; /* * Expand file stored in ICB to a normal one-block-file * * This function requires i_data_sem for writing and releases it. * This function requires i_mutex held */ int udf_expand_file_adinicb(struct inode *inode) { struct page *page; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); int err; struct writeback_control udf_wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = 1, }; WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex)); if (!iinfo->i_lenAlloc) { if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; up_write(&iinfo->i_data_sem); mark_inode_dirty(inode); return 0; } /* * Release i_data_sem so that we can lock a page - page lock ranks * above i_data_sem. i_mutex still protects us against file changes. */ up_write(&iinfo->i_data_sem); page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); if (!page) return -ENOMEM; if (!PageUptodate(page)) { kaddr = kmap(page); memset(kaddr + iinfo->i_lenAlloc, 0x00, PAGE_CACHE_SIZE - iinfo->i_lenAlloc); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, iinfo->i_lenAlloc); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); } down_write(&iinfo->i_data_sem); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; up_write(&iinfo->i_data_sem); err = inode->i_data.a_ops->writepage(page, &udf_wbc); if (err) { /* Restore everything back so that we don't lose data... */ lock_page(page); kaddr = kmap(page); down_write(&iinfo->i_data_sem); memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); kunmap(page); unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; inode->i_data.a_ops = &udf_adinicb_aops; up_write(&iinfo->i_data_sem); } page_cache_release(page); mark_inode_dirty(inode); return err; } struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block, int *err) { int newblock; struct buffer_head *dbh = NULL; struct kernel_lb_addr eloc; uint8_t alloctype; struct extent_position epos; struct udf_fileident_bh sfibh, dfibh; loff_t f_pos = udf_ext0_offset(inode); int size = udf_ext0_offset(inode) + inode->i_size; struct fileIdentDesc cfi, *sfi, *dfi; struct udf_inode_info *iinfo = UDF_I(inode); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) alloctype = ICBTAG_FLAG_AD_SHORT; else alloctype = ICBTAG_FLAG_AD_LONG; if (!inode->i_size) { iinfo->i_alloc_type = alloctype; mark_inode_dirty(inode); return NULL; } /* alloc block, and copy data to it */ *block = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, iinfo->i_location.logicalBlockNum, err); if (!(*block)) return NULL; newblock = udf_get_pblock(inode->i_sb, *block, iinfo->i_location.partitionReferenceNum, 0); if (!newblock) return NULL; dbh = udf_tgetblk(inode->i_sb, newblock); if (!dbh) return NULL; lock_buffer(dbh); memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(dbh); unlock_buffer(dbh); mark_buffer_dirty_inode(dbh, inode); sfibh.soffset = sfibh.eoffset = f_pos & (inode->i_sb->s_blocksize - 1); sfibh.sbh = sfibh.ebh = NULL; dfibh.soffset = dfibh.eoffset = 0; dfibh.sbh = dfibh.ebh = dbh; while (f_pos < size) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL); if (!sfi) { brelse(dbh); return NULL; } iinfo->i_alloc_type = alloctype; sfi->descTag.tagLocation = cpu_to_le32(*block); dfibh.soffset = dfibh.eoffset; dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse))) { iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; brelse(dbh); return NULL; } } mark_buffer_dirty_inode(dbh, inode); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; eloc.logicalBlockNum = *block; eloc.partitionReferenceNum = iinfo->i_location.partitionReferenceNum; iinfo->i_lenExtents = inode->i_size; epos.bh = NULL; epos.block = iinfo->i_location; epos.offset = udf_file_entry_alloc_offset(inode); udf_add_aext(inode, &epos, &eloc, inode->i_size, 0); /* UniqueID stuff */ brelse(epos.bh); mark_inode_dirty(inode); return dbh; } static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { int err, new; sector_t phys = 0; struct udf_inode_info *iinfo; if (!create) { phys = udf_block_map(inode, block); if (phys) map_bh(bh_result, inode->i_sb, phys); return 0; } err = -EIO; new = 0; iinfo = UDF_I(inode); down_write(&iinfo->i_data_sem); if (block == iinfo->i_next_alloc_block + 1) { iinfo->i_next_alloc_block++; iinfo->i_next_alloc_goal++; } udf_clear_extent_cache(inode); phys = inode_getblk(inode, block, &err, &new); if (!phys) goto abort; if (new) set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, phys); abort: up_write(&iinfo->i_data_sem); return err; } static struct buffer_head *udf_getblk(struct inode *inode, long block, int create, int *err) { struct buffer_head *bh; struct buffer_head dummy; dummy.b_state = 0; dummy.b_blocknr = -1000; *err = udf_get_block(inode, block, &dummy, create); if (!*err && buffer_mapped(&dummy)) { bh = sb_getblk(inode->i_sb, dummy.b_blocknr); if (buffer_new(&dummy)) { lock_buffer(bh); memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); } return bh; } return NULL; } /* Extend the file by 'blocks' blocks, return the number of extents added */ static int udf_do_extend_file(struct inode *inode, struct extent_position *last_pos, struct kernel_long_ad *last_ext, sector_t blocks) { sector_t add; int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); struct super_block *sb = inode->i_sb; struct kernel_lb_addr prealloc_loc = {}; int prealloc_len = 0; struct udf_inode_info *iinfo; int err; /* The previous extent is fake and we should not extend by anything * - there's nothing to do... */ if (!blocks && fake) return 0; iinfo = UDF_I(inode); /* Round the last extent up to a multiple of block size */ if (last_ext->extLength & (sb->s_blocksize - 1)) { last_ext->extLength = (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); iinfo->i_lenExtents = (iinfo->i_lenExtents + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); } /* Last extent are just preallocated blocks? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) { /* Save the extent so that we can reattach it to the end */ prealloc_loc = last_ext->extLocation; prealloc_len = last_ext->extLength; /* Mark the extent as a hole */ last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); last_ext->extLocation.logicalBlockNum = 0; last_ext->extLocation.partitionReferenceNum = 0; } /* Can we merge with the previous extent? */ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { add = ((1 << 30) - sb->s_blocksize - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits; if (add > blocks) add = blocks; blocks -= add; last_ext->extLength += add << sb->s_blocksize_bits; } if (fake) { udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); count++; } else udf_write_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); /* Managed to do everything necessary? */ if (!blocks) goto out; /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ last_ext->extLocation.logicalBlockNum = 0; last_ext->extLocation.partitionReferenceNum = 0; add = (1 << (30-sb->s_blocksize_bits)) - 1; last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits); /* Create enough extents to cover the whole hole */ while (blocks > add) { blocks -= add; err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) return err; count++; } if (blocks) { last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (blocks << sb->s_blocksize_bits); err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) return err; count++; } out: /* Do we have some preallocated blocks saved? */ if (prealloc_len) { err = udf_add_aext(inode, last_pos, &prealloc_loc, prealloc_len, 1); if (err) return err; last_ext->extLocation = prealloc_loc; last_ext->extLength = prealloc_len; count++; } /* last_pos should point to the last written extent... */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) last_pos->offset -= sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) last_pos->offset -= sizeof(struct long_ad); else return -EIO; return count; } static int udf_extend_file(struct inode *inode, loff_t newsize) { struct extent_position epos; struct kernel_lb_addr eloc; uint32_t elen; int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = newsize >> sb->s_blocksize_bits, offset; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); struct kernel_long_ad extent; int err; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else BUG(); etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); /* File has extent covering the new size (could happen when extending * inside a block)? */ if (etype != -1) return 0; if (newsize & (sb->s_blocksize - 1)) offset++; /* Extended file just to the boundary of the last file block? */ if (offset == 0) return 0; /* Truncate is extending the file by 'offset' blocks */ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { /* File has no extents at all or has empty last * indirect extent! Create a fake extent... */ extent.extLocation.logicalBlockNum = 0; extent.extLocation.partitionReferenceNum = 0; extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; } else { epos.offset -= adsize; etype = udf_next_aext(inode, &epos, &extent.extLocation, &extent.extLength, 0); extent.extLength |= etype << 30; } err = udf_do_extend_file(inode, &epos, &extent, offset); if (err < 0) goto out; err = 0; iinfo->i_lenExtents = newsize; out: brelse(epos.bh); return err; } static sector_t inode_getblk(struct inode *inode, sector_t block, int *err, int *new) { struct kernel_long_ad laarr[EXTENT_MERGE_SIZE]; struct extent_position prev_epos, cur_epos, next_epos; int count = 0, startnum = 0, endnum = 0; uint32_t elen = 0, tmpelen; struct kernel_lb_addr eloc, tmpeloc; int c = 1; loff_t lbcount = 0, b_off = 0; uint32_t newblocknum, newblock; sector_t offset = 0; int8_t etype; struct udf_inode_info *iinfo = UDF_I(inode); int goal = 0, pgoal = iinfo->i_location.logicalBlockNum; int lastblock = 0; bool isBeyondEOF; *err = 0; *new = 0; prev_epos.offset = udf_file_entry_alloc_offset(inode); prev_epos.block = iinfo->i_location; prev_epos.bh = NULL; cur_epos = next_epos = prev_epos; b_off = (loff_t)block << inode->i_sb->s_blocksize_bits; /* find the extent which contains the block we are looking for. alternate between laarr[0] and laarr[1] for locations of the current extent, and the previous extent */ do { if (prev_epos.bh != cur_epos.bh) { brelse(prev_epos.bh); get_bh(cur_epos.bh); prev_epos.bh = cur_epos.bh; } if (cur_epos.bh != next_epos.bh) { brelse(cur_epos.bh); get_bh(next_epos.bh); cur_epos.bh = next_epos.bh; } lbcount += elen; prev_epos.block = cur_epos.block; cur_epos.block = next_epos.block; prev_epos.offset = cur_epos.offset; cur_epos.offset = next_epos.offset; etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1); if (etype == -1) break; c = !c; laarr[c].extLength = (etype << 30) | elen; laarr[c].extLocation = eloc; if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) pgoal = eloc.logicalBlockNum + ((elen + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); count++; } while (lbcount + elen <= b_off); b_off -= lbcount; offset = b_off >> inode->i_sb->s_blocksize_bits; /* * Move prev_epos and cur_epos into indirect extent if we are at * the pointer to it */ udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0); udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0); /* if the extent is allocated and recorded, return the block if the extent is not a multiple of the blocksize, round up */ if (etype == (EXT_RECORDED_ALLOCATED >> 30)) { if (elen & (inode->i_sb->s_blocksize - 1)) { elen = EXT_RECORDED_ALLOCATED | ((elen + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); udf_write_aext(inode, &cur_epos, &eloc, elen, 1); } brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset); return newblock; } /* Are we beyond EOF? */ if (etype == -1) { int ret; isBeyondEOF = 1; if (count) { if (c) laarr[0] = laarr[1]; startnum = 1; } else { /* Create a fake extent when there's not one */ memset(&laarr[0].extLocation, 0x00, sizeof(struct kernel_lb_addr)); laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; /* Will udf_do_extend_file() create real extent from a fake one? */ startnum = (offset > 0); } /* Create extents for the hole between EOF and offset */ ret = udf_do_extend_file(inode, &prev_epos, laarr, offset); if (ret < 0) { brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); *err = ret; return 0; } c = 0; offset = 0; count += ret; /* We are not covered by a preallocated extent? */ if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) { /* Is there any real extent? - otherwise we overwrite * the fake one... */ if (count) c = !c; laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | inode->i_sb->s_blocksize; memset(&laarr[c].extLocation, 0x00, sizeof(struct kernel_lb_addr)); count++; } endnum = c + 1; lastblock = 1; } else { isBeyondEOF = 0; endnum = startnum = ((count > 2) ? 2 : count); /* if the current extent is in position 0, swap it with the previous */ if (!c && count != 1) { laarr[2] = laarr[0]; laarr[0] = laarr[1]; laarr[1] = laarr[2]; c = 1; } /* if the current block is located in an extent, read the next extent */ etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0); if (etype != -1) { laarr[c + 1].extLength = (etype << 30) | elen; laarr[c + 1].extLocation = eloc; count++; startnum++; endnum++; } else lastblock = 1; } /* if the current extent is not recorded but allocated, get the * block in the extent corresponding to the requested block */ if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) newblocknum = laarr[c].extLocation.logicalBlockNum + offset; else { /* otherwise, allocate a new block */ if (iinfo->i_next_alloc_block == block) goal = iinfo->i_next_alloc_goal; if (!goal) { if (!(goal = pgoal)) /* XXX: what was intended here? */ goal = iinfo->i_location.logicalBlockNum + 1; } newblocknum = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, goal, err); if (!newblocknum) { brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); *err = -ENOSPC; return 0; } if (isBeyondEOF) iinfo->i_lenExtents += inode->i_sb->s_blocksize; } /* if the extent the requsted block is located in contains multiple * blocks, split the extent into at most three extents. blocks prior * to requested block, requested block, and blocks after requested * block */ udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); #ifdef UDF_PREALLOCATE /* We preallocate blocks only for regular files. It also makes sense * for directories but there's a problem when to drop the * preallocation. We might use some delayed work for that but I feel * it's overengineering for a filesystem like UDF. */ if (S_ISREG(inode->i_mode)) udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); #endif /* merge any continuous blocks in laarr */ udf_merge_extents(inode, laarr, &endnum); /* write back the new extents, inserting new extents if the new number * of extents is greater than the old number, and deleting extents if * the new number of extents is less than the old number */ udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); brelse(prev_epos.bh); brelse(cur_epos.bh); brelse(next_epos.bh); newblock = udf_get_pblock(inode->i_sb, newblocknum, iinfo->i_location.partitionReferenceNum, 0); if (!newblock) { *err = -EIO; return 0; } *new = 1; iinfo->i_next_alloc_block = block; iinfo->i_next_alloc_goal = newblocknum; inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return newblock; } static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { int curr = *c; int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits; int8_t etype = (laarr[curr].extLength >> 30); if (blen == 1) ; else if (!offset || blen == offset + 1) { laarr[curr + 2] = laarr[curr + 1]; laarr[curr + 1] = laarr[curr]; } else { laarr[curr + 3] = laarr[curr + 1]; laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; } if (offset) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &laarr[curr].extLocation, 0, offset); laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (offset << blocksize_bits); laarr[curr].extLocation.logicalBlockNum = 0; laarr[curr].extLocation. partitionReferenceNum = 0; } else laarr[curr].extLength = (etype << 30) | (offset << blocksize_bits); curr++; (*c)++; (*endnum)++; } laarr[curr].extLocation.logicalBlockNum = newblocknum; if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) laarr[curr].extLocation.partitionReferenceNum = UDF_I(inode)->i_location.partitionReferenceNum; laarr[curr].extLength = EXT_RECORDED_ALLOCATED | blocksize; curr++; if (blen != offset + 1) { if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) laarr[curr].extLocation.logicalBlockNum += offset + 1; laarr[curr].extLength = (etype << 30) | ((blen - (offset + 1)) << blocksize_bits); curr++; (*endnum)++; } } } static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { int start, length = 0, currlength = 0, i; if (*endnum >= (c + 1)) { if (!lastblock) return; else start = c; } else { if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { start = c + 1; length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else start = c; } for (i = start + 1; i <= *endnum; i++) { if (i == *endnum) { if (lastblock) length += UDF_DEFAULT_PREALLOC_BLOCKS; } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); } else break; } if (length) { int next = laarr[start].extLocation.logicalBlockNum + (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); int numalloc = udf_prealloc_blocks(inode->i_sb, inode, laarr[start].extLocation.partitionReferenceNum, next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); if (numalloc) { if (start == (c + 1)) laarr[start].extLength += (numalloc << inode->i_sb->s_blocksize_bits); else { memmove(&laarr[c + 2], &laarr[c + 1], sizeof(struct long_ad) * (*endnum - (c + 1))); (*endnum)++; laarr[c + 1].extLocation.logicalBlockNum = next; laarr[c + 1].extLocation.partitionReferenceNum = laarr[c].extLocation. partitionReferenceNum; laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED | (numalloc << inode->i_sb->s_blocksize_bits); start = c + 1; } for (i = start + 1; numalloc && i < *endnum; i++) { int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (elen > numalloc) { laarr[i].extLength -= (numalloc << inode->i_sb->s_blocksize_bits); numalloc = 0; } else { numalloc -= elen; if (*endnum > (i + 1)) memmove(&laarr[i], &laarr[i + 1], sizeof(struct long_ad) * (*endnum - (i + 1))); i--; (*endnum)--; } } UDF_I(inode)->i_lenExtents += numalloc << inode->i_sb->s_blocksize_bits; } } } static void udf_merge_extents(struct inode *inode, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) { int i; unsigned long blocksize = inode->i_sb->s_blocksize; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; for (i = 0; i < (*endnum - 1); i++) { struct kernel_long_ad *li /*l[i]*/ = &laarr[i]; struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1]; if (((li->extLength >> 30) == (lip1->extLength >> 30)) && (((li->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || ((lip1->extLocation.logicalBlockNum - li->extLocation.logicalBlockNum) == (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits)))) { if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { lip1->extLength = (lip1->extLength - (li->extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(blocksize - 1); li->extLength = (li->extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - blocksize; lip1->extLocation.logicalBlockNum = li->extLocation.logicalBlockNum + ((li->extLength & UDF_EXTENT_LENGTH_MASK) >> blocksize_bits); } else { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if (((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && ((lip1->extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { lip1->extLength = (lip1->extLength - (li->extLength & UDF_EXTENT_LENGTH_MASK) + UDF_EXTENT_LENGTH_MASK) & ~(blocksize - 1); li->extLength = (li->extLength & UDF_EXTENT_FLAG_MASK) + (UDF_EXTENT_LENGTH_MASK + 1) - blocksize; } else { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) & ~(blocksize - 1)); if (*endnum > (i + 2)) memmove(&laarr[i + 1], &laarr[i + 2], sizeof(struct long_ad) * (*endnum - (i + 2))); i--; (*endnum)--; } } else if ((li->extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, ((li->extLength & UDF_EXTENT_LENGTH_MASK) + blocksize - 1) >> blocksize_bits); li->extLocation.logicalBlockNum = 0; li->extLocation.partitionReferenceNum = 0; li->extLength = (li->extLength & UDF_EXTENT_LENGTH_MASK) | EXT_NOT_RECORDED_NOT_ALLOCATED; } } } static void udf_update_extents(struct inode *inode, struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum, struct extent_position *epos) { int start = 0, i; struct kernel_lb_addr tmploc; uint32_t tmplen; if (startnum > endnum) { for (i = 0; i < (startnum - endnum); i++) udf_delete_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); } else if (startnum < endnum) { for (i = 0; i < (endnum - startnum); i++) { udf_insert_aext(inode, *epos, laarr[i].extLocation, laarr[i].extLength); udf_next_aext(inode, epos, &laarr[i].extLocation, &laarr[i].extLength, 1); start++; } } for (i = start; i < endnum; i++) { udf_next_aext(inode, epos, &tmploc, &tmplen, 0); udf_write_aext(inode, epos, &laarr[i].extLocation, laarr[i].extLength, 1); } } struct buffer_head *udf_bread(struct inode *inode, int block, int create, int *err) { struct buffer_head *bh = NULL; bh = udf_getblk(inode, block, create, err); if (!bh) return NULL; if (buffer_uptodate(bh)) return bh; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); *err = -EIO; return NULL; } int udf_setsize(struct inode *inode, loff_t newsize) { int err; struct udf_inode_info *iinfo; int bsize = 1 << inode->i_blkbits; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return -EINVAL; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; iinfo = UDF_I(inode); if (newsize > inode->i_size) { down_write(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { if (bsize < (udf_file_entry_alloc_offset(inode) + newsize)) { err = udf_expand_file_adinicb(inode); if (err) return err; down_write(&iinfo->i_data_sem); } else { iinfo->i_lenAlloc = newsize; goto set_size; } } err = udf_extend_file(inode, newsize); if (err) { up_write(&iinfo->i_data_sem); return err; } set_size: truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); } else { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize, 0x00, bsize - newsize - udf_file_entry_alloc_offset(inode)); iinfo->i_lenAlloc = newsize; truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); goto update_time; } err = block_truncate_page(inode->i_mapping, newsize, udf_get_block); if (err) return err; down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); truncate_setsize(inode, newsize); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } update_time: inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); if (IS_SYNC(inode)) udf_sync_inode(inode); else mark_inode_dirty(inode); return 0; } static void __udf_read_inode(struct inode *inode) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint16_t ident; struct udf_inode_info *iinfo = UDF_I(inode); struct udf_sb_info *sbi = UDF_SB(inode->i_sb); unsigned int link_count; /* * Set defaults, but the inode is still incomplete! * Note: get_new_inode() sets the following on a new inode: * i_sb = sb * i_no = ino * i_flags = sb->s_flags * i_state = 0 * clean_inode(): zero fills and sets * i_count = 1 * i_nlink = 1 * i_op = NULL; */ bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident); if (!bh) { udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino); make_bad_inode(inode); return; } if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && ident != TAG_IDENT_USE) { udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n", inode->i_ino, ident); brelse(bh); make_bad_inode(inode); return; } fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (fe->icbTag.strategyType == cpu_to_le16(4096)) { struct buffer_head *ibh; ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1, &ident); if (ident == TAG_IDENT_IE && ibh) { struct buffer_head *nbh = NULL; struct kernel_lb_addr loc; struct indirectEntry *ie; ie = (struct indirectEntry *)ibh->b_data; loc = lelb_to_cpu(ie->indirectICB.extLocation); if (ie->indirectICB.extLength && (nbh = udf_read_ptagged(inode->i_sb, &loc, 0, &ident))) { if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE) { memcpy(&iinfo->i_location, &loc, sizeof(struct kernel_lb_addr)); brelse(bh); brelse(ibh); brelse(nbh); __udf_read_inode(inode); return; } brelse(nbh); } } brelse(ibh); } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { udf_err(inode->i_sb, "unsupported strategy type: %d\n", le16_to_cpu(fe->icbTag.strategyType)); brelse(bh); make_bad_inode(inode); return; } if (fe->icbTag.strategyType == cpu_to_le16(4)) iinfo->i_strat4096 = 0; else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ iinfo->i_strat4096 = 1; iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; iinfo->i_lenAlloc = 0; iinfo->i_next_alloc_block = 0; iinfo->i_next_alloc_goal = 0; if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { iinfo->i_efe = 1; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { iinfo->i_efe = 0; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { iinfo->i_efe = 0; iinfo->i_use = 1; iinfo->i_lenAlloc = le32_to_cpu( ((struct unallocSpaceEntry *)bh->b_data)-> lengthAllocDescs); if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); return; } read_lock(&sbi->s_cred_lock); i_uid_write(inode, le32_to_cpu(fe->uid)); if (!uid_valid(inode->i_uid) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) inode->i_uid = UDF_SB(inode->i_sb)->s_uid; i_gid_write(inode, le32_to_cpu(fe->gid)); if (!gid_valid(inode->i_gid) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) inode->i_gid = UDF_SB(inode->i_sb)->s_gid; if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_fmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_fmode; else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_dmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_dmode; else inode->i_mode = udf_convert_permissions(fe); inode->i_mode &= ~sbi->s_umask; read_unlock(&sbi->s_cred_lock); link_count = le16_to_cpu(fe->fileLinkCount); if (!link_count) link_count = 1; set_nlink(inode, link_count); inode->i_size = le64_to_cpu(fe->informationLength); iinfo->i_lenExtents = inode->i_size; if (iinfo->i_efe == 0) { inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(fe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint); } else { inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime)) iinfo->i_crtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(efe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); } switch (fe->icbTag.fileType) { case ICBTAG_FILE_TYPE_DIRECTORY: inode->i_op = &udf_dir_inode_operations; inode->i_fop = &udf_dir_operations; inode->i_mode |= S_IFDIR; inc_nlink(inode); break; case ICBTAG_FILE_TYPE_REALTIME: case ICBTAG_FILE_TYPE_REGULAR: case ICBTAG_FILE_TYPE_UNDEF: case ICBTAG_FILE_TYPE_VAT20: if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) inode->i_data.a_ops = &udf_adinicb_aops; else inode->i_data.a_ops = &udf_aops; inode->i_op = &udf_file_inode_operations; inode->i_fop = &udf_file_operations; inode->i_mode |= S_IFREG; break; case ICBTAG_FILE_TYPE_BLOCK: inode->i_mode |= S_IFBLK; break; case ICBTAG_FILE_TYPE_CHAR: inode->i_mode |= S_IFCHR; break; case ICBTAG_FILE_TYPE_FIFO: init_special_inode(inode, inode->i_mode | S_IFIFO, 0); break; case ICBTAG_FILE_TYPE_SOCKET: init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); break; case ICBTAG_FILE_TYPE_SYMLINK: inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; inode->i_mode = S_IFLNK | S_IRWXUGO; break; case ICBTAG_FILE_TYPE_MAIN: udf_debug("METADATA FILE-----\n"); break; case ICBTAG_FILE_TYPE_MIRROR: udf_debug("METADATA MIRROR FILE-----\n"); break; case ICBTAG_FILE_TYPE_BITMAP: udf_debug("METADATA BITMAP FILE-----\n"); break; default: udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n", inode->i_ino, fe->icbTag.fileType); make_bad_inode(inode); return; } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (dsea) { init_special_inode(inode, inode->i_mode, MKDEV(le32_to_cpu(dsea->majorDeviceIdent), le32_to_cpu(dsea->minorDeviceIdent))); /* Developer ID ??? */ } else make_bad_inode(inode); } brelse(bh); } static int udf_alloc_i_data(struct inode *inode, size_t size) { struct udf_inode_info *iinfo = UDF_I(inode); iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL); if (!iinfo->i_ext.i_data) { udf_err(inode->i_sb, "(ino %ld) no free memory\n", inode->i_ino); return -ENOMEM; } return 0; } static umode_t udf_convert_permissions(struct fileEntry *fe) { umode_t mode; uint32_t permissions; uint32_t flags; permissions = le32_to_cpu(fe->permissions); flags = le16_to_cpu(fe->icbTag.flags); mode = ((permissions) & S_IRWXO) | ((permissions >> 2) & S_IRWXG) | ((permissions >> 4) & S_IRWXU) | ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); return mode; } int udf_write_inode(struct inode *inode, struct writeback_control *wbc) { return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } static int udf_sync_inode(struct inode *inode) { return udf_update_inode(inode, 1); } static int udf_update_inode(struct inode *inode, int do_sync) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint64_t lb_recorded; uint32_t udfperms; uint16_t icbflags; uint16_t crclen; int err = 0; struct udf_sb_info *sbi = UDF_SB(inode->i_sb); unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; struct udf_inode_info *iinfo = UDF_I(inode); bh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0)); if (!bh) { udf_debug("getblk failure\n"); return -ENOMEM; } lock_buffer(bh); memset(bh->b_data, 0, inode->i_sb->s_blocksize); fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (iinfo->i_use) { struct unallocSpaceEntry *use = (struct unallocSpaceEntry *)bh->b_data; use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); use->descTag.tagLocation = cpu_to_le32(iinfo->i_location.logicalBlockNum); crclen = sizeof(struct unallocSpaceEntry) + iinfo->i_lenAlloc - sizeof(struct tag); use->descTag.descCRCLength = cpu_to_le16(crclen); use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use + sizeof(struct tag), crclen)); use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); goto out; } if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) fe->uid = cpu_to_le32(-1); else fe->uid = cpu_to_le32(i_uid_read(inode)); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET)) fe->gid = cpu_to_le32(-1); else fe->gid = cpu_to_le32(i_gid_read(inode)); udfperms = ((inode->i_mode & S_IRWXO)) | ((inode->i_mode & S_IRWXG) << 2) | ((inode->i_mode & S_IRWXU) << 4); udfperms |= (le32_to_cpu(fe->permissions) & (FE_PERM_O_DELETE | FE_PERM_O_CHATTR | FE_PERM_G_DELETE | FE_PERM_G_CHATTR | FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); fe->permissions = cpu_to_le32(udfperms); if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0) fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); else fe->fileLinkCount = cpu_to_le16(inode->i_nlink); fe->informationLength = cpu_to_le64(inode->i_size); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct regid *eid; struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (!dsea) { dsea = (struct deviceSpec *) udf_add_extendedattr(inode, sizeof(struct deviceSpec) + sizeof(struct regid), 12, 0x3); dsea->attrType = cpu_to_le32(12); dsea->attrSubtype = 1; dsea->attrLength = cpu_to_le32( sizeof(struct deviceSpec) + sizeof(struct regid)); dsea->impUseLength = cpu_to_le32(sizeof(struct regid)); } eid = (struct regid *)dsea->impUse; memset(eid, 0, sizeof(struct regid)); strcpy(eid->ident, UDF_ID_DEVELOPER); eid->identSuffix[0] = UDF_OS_CLASS_UNIX; eid->identSuffix[1] = UDF_OS_ID_LINUX; dsea->majorDeviceIdent = cpu_to_le32(imajor(inode)); dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); } if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) lb_recorded = 0; /* No extents => no blocks! */ else lb_recorded = (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> (blocksize_bits - 9); if (iinfo->i_efe == 0) { memcpy(bh->b_data + sizeof(struct fileEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct fileEntry)); fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime); memset(&(fe->impIdent), 0, sizeof(struct regid)); strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; fe->uniqueID = cpu_to_le64(iinfo->i_unique); fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); crclen = sizeof(struct fileEntry); } else { memcpy(bh->b_data + sizeof(struct extendedFileEntry), iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); efe->objectSize = cpu_to_le64(inode->i_size); efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec)) iinfo->i_crtime = inode->i_atime; if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec)) iinfo->i_crtime = inode->i_mtime; if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec || (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec && iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec)) iinfo->i_crtime = inode->i_ctime; udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime); udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime); udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime); udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime); memset(&(efe->impIdent), 0, sizeof(struct regid)); strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; efe->uniqueID = cpu_to_le64(iinfo->i_unique); efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); crclen = sizeof(struct extendedFileEntry); } if (iinfo->i_strat4096) { fe->icbTag.strategyType = cpu_to_le16(4096); fe->icbTag.strategyParameter = cpu_to_le16(1); fe->icbTag.numEntries = cpu_to_le16(2); } else { fe->icbTag.strategyType = cpu_to_le16(4); fe->icbTag.numEntries = cpu_to_le16(1); } if (S_ISDIR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; else if (S_ISREG(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; else if (S_ISLNK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK; else if (S_ISBLK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK; else if (S_ISCHR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR; else if (S_ISFIFO(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO; else if (S_ISSOCK(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; icbflags = iinfo->i_alloc_type | ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | (le16_to_cpu(fe->icbTag.flags) & ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID | ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); fe->icbTag.flags = cpu_to_le16(icbflags); if (sbi->s_udfrev >= 0x0200) fe->descTag.descVersion = cpu_to_le16(3); else fe->descTag.descVersion = cpu_to_le16(2); fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); fe->descTag.tagLocation = cpu_to_le32( iinfo->i_location.logicalBlockNum); crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag); fe->descTag.descCRCLength = cpu_to_le16(crclen); fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag), crclen)); fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); out: set_buffer_uptodate(bh); unlock_buffer(bh); /* write the data blocks */ mark_buffer_dirty(bh); if (do_sync) { sync_dirty_buffer(bh); if (buffer_write_io_error(bh)) { udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n", inode->i_ino); err = -EIO; } } brelse(bh); return err; } struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino) { unsigned long block = udf_get_lb_pblock(sb, ino, 0); struct inode *inode = iget_locked(sb, block); if (!inode) return NULL; if (inode->i_state & I_NEW) { memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); __udf_read_inode(inode); unlock_new_inode(inode); } if (is_bad_inode(inode)) goto out_iput; if (ino->logicalBlockNum >= UDF_SB(sb)-> s_partmaps[ino->partitionReferenceNum].s_partition_len) { udf_debug("block=%d, partition=%d out of range\n", ino->logicalBlockNum, ino->partitionReferenceNum); make_bad_inode(inode); goto out_iput; } return inode; out_iput: iput(inode); return NULL; } int udf_add_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; struct short_ad *sad = NULL; struct long_ad *lad = NULL; struct allocExtDesc *aed; uint8_t *ptr; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else ptr = epos->bh->b_data + epos->offset; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else return -EIO; if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) { unsigned char *sptr, *dptr; struct buffer_head *nbh; int err, loffset; struct kernel_lb_addr obloc = epos->block; epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL, obloc.partitionReferenceNum, obloc.logicalBlockNum, &err); if (!epos->block.logicalBlockNum) return -ENOSPC; nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, &epos->block, 0)); if (!nbh) return -EIO; lock_buffer(nbh); memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(nbh); unlock_buffer(nbh); mark_buffer_dirty_inode(nbh, inode); aed = (struct allocExtDesc *)(nbh->b_data); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum); if (epos->offset + adsize > inode->i_sb->s_blocksize) { loffset = epos->offset; aed->lengthAllocDescs = cpu_to_le32(adsize); sptr = ptr - adsize; dptr = nbh->b_data + sizeof(struct allocExtDesc); memcpy(dptr, sptr, adsize); epos->offset = sizeof(struct allocExtDesc) + adsize; } else { loffset = epos->offset + adsize; aed->lengthAllocDescs = cpu_to_le32(0); sptr = ptr; epos->offset = sizeof(struct allocExtDesc); if (epos->bh) { aed = (struct allocExtDesc *)epos->bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); } else { iinfo->i_lenAlloc += adsize; mark_inode_dirty(inode); } } if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200) udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1, epos->block.logicalBlockNum, sizeof(struct tag)); else udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1, epos->block.logicalBlockNum, sizeof(struct tag)); switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)sptr; sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | inode->i_sb->s_blocksize); sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)sptr; lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | inode->i_sb->s_blocksize); lad->extLocation = cpu_to_lelb(epos->block); memset(lad->impUse, 0x00, sizeof(lad->impUse)); break; } if (epos->bh) { if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos->bh->b_data, loffset); else udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos->bh, inode); brelse(epos->bh); } else { mark_inode_dirty(inode); } epos->bh = nbh; } udf_write_aext(inode, epos, eloc, elen, inc); if (!epos->bh) { iinfo->i_lenAlloc += adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)epos->bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize)); else udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(epos->bh, inode); } return 0; } void udf_write_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc) { int adsize; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else ptr = epos->bh->b_data + epos->offset; switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = (struct short_ad *)ptr; sad->extLength = cpu_to_le32(elen); sad->extPosition = cpu_to_le32(eloc->logicalBlockNum); adsize = sizeof(struct short_ad); break; case ICBTAG_FLAG_AD_LONG: lad = (struct long_ad *)ptr; lad->extLength = cpu_to_le32(elen); lad->extLocation = cpu_to_lelb(*eloc); memset(lad->impUse, 0x00, sizeof(lad->impUse)); adsize = sizeof(struct long_ad); break; default: return; } if (epos->bh) { if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) { struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data; udf_update_tag(epos->bh->b_data, le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc)); } mark_buffer_dirty_inode(epos->bh, inode); } else { mark_inode_dirty(inode); } if (inc) epos->offset += adsize; } int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int8_t etype; while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { int block; epos->block = *eloc; epos->offset = sizeof(struct allocExtDesc); brelse(epos->bh); block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0); epos->bh = udf_tread(inode->i_sb, block); if (!epos->bh) { udf_debug("reading block %d failed!\n", block); return -1; } } return etype; } int8_t udf_current_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int alen; int8_t etype; uint8_t *ptr; struct short_ad *sad; struct long_ad *lad; struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) { if (!epos->offset) epos->offset = udf_file_entry_alloc_offset(inode); ptr = iinfo->i_ext.i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; alen = udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc; } else { if (!epos->offset) epos->offset = sizeof(struct allocExtDesc); ptr = epos->bh->b_data + epos->offset; alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)-> lengthAllocDescs); } switch (iinfo->i_alloc_type) { case ICBTAG_FLAG_AD_SHORT: sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc); if (!sad) return -1; etype = le32_to_cpu(sad->extLength) >> 30; eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); eloc->partitionReferenceNum = iinfo->i_location.partitionReferenceNum; *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; break; case ICBTAG_FLAG_AD_LONG: lad = udf_get_filelongad(ptr, alen, &epos->offset, inc); if (!lad) return -1; etype = le32_to_cpu(lad->extLength) >> 30; *eloc = lelb_to_cpu(lad->extLocation); *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; break; default: udf_debug("alloc_type = %d unsupported\n", iinfo->i_alloc_type); return -1; } return etype; } static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos, struct kernel_lb_addr neloc, uint32_t nelen) { struct kernel_lb_addr oeloc; uint32_t oelen; int8_t etype; if (epos.bh) get_bh(epos.bh); while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) { udf_write_aext(inode, &epos, &neloc, nelen, 1); neloc = oeloc; nelen = (etype << 30) | oelen; } udf_add_aext(inode, &epos, &neloc, nelen, 1); brelse(epos.bh); return (nelen >> 30); } int8_t udf_delete_aext(struct inode *inode, struct extent_position epos, struct kernel_lb_addr eloc, uint32_t elen) { struct extent_position oepos; int adsize; int8_t etype; struct allocExtDesc *aed; struct udf_inode_info *iinfo; if (epos.bh) { get_bh(epos.bh); get_bh(epos.bh); } iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) adsize = sizeof(struct long_ad); else adsize = 0; oepos = epos; if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1) return -1; while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1); if (oepos.bh != epos.bh) { oepos.block = epos.block; brelse(oepos.bh); get_bh(epos.bh); oepos.bh = epos.bh; oepos.offset = epos.offset - adsize; } } memset(&eloc, 0x00, sizeof(struct kernel_lb_addr)); elen = 0; if (epos.bh != oepos.bh) { udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= (adsize * 2); mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize)); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize)); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } else { udf_write_aext(inode, &oepos, &eloc, elen, 1); if (!oepos.bh) { iinfo->i_lenAlloc -= adsize; mark_inode_dirty(inode); } else { aed = (struct allocExtDesc *)oepos.bh->b_data; le32_add_cpu(&aed->lengthAllocDescs, -adsize); if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) udf_update_tag(oepos.bh->b_data, epos.offset - adsize); else udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); mark_buffer_dirty_inode(oepos.bh, inode); } } brelse(epos.bh); brelse(oepos.bh); return (elen >> 30); } int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos, struct kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset) { unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits; int8_t etype; struct udf_inode_info *iinfo; iinfo = UDF_I(inode); if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) { pos->offset = 0; pos->block = iinfo->i_location; pos->bh = NULL; } *elen = 0; do { etype = udf_next_aext(inode, pos, eloc, elen, 1); if (etype == -1) { *offset = (bcount - lbcount) >> blocksize_bits; iinfo->i_lenExtents = lbcount; return -1; } lbcount += *elen; } while (lbcount <= bcount); /* update extent cache */ udf_update_extent_cache(inode, lbcount - *elen, pos, 1); *offset = (bcount + *elen - lbcount) >> blocksize_bits; return etype; } long udf_block_map(struct inode *inode, sector_t block) { struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; int ret; down_read(&UDF_I(inode)->i_data_sem); if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset); else ret = 0; up_read(&UDF_I(inode)->i_data_sem); brelse(epos.bh); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV)) return udf_fixed_to_variable(ret); else return ret; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2272_0
crossvul-cpp_data_bad_909_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS % % P P SS % % PPPP SSS % % P SS % % P SSSSS % % % % % % Read/Write Postscript Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/delegate-private.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/profile.h" #include "MagickCore/resource_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* Forward declarations. */ static MagickBooleanType WritePSImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v o k e P o s t s r i p t D e l e g a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InvokePostscriptDelegate() executes the Postscript interpreter with the % specified command. % % The format of the InvokePostscriptDelegate method is: % % MagickBooleanType InvokePostscriptDelegate( % const MagickBooleanType verbose,const char *command, % ExceptionInfo *exception) % % A description of each parameter follows: % % o verbose: A value other than zero displays the command prior to % executing it. % % o command: the address of a character string containing the command to % execute. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT) static int MagickDLLCall PostscriptDelegateMessage(void *handle, const char *message,int length) { char **messages; ssize_t offset; offset=0; messages=(char **) handle; if (*messages == (char *) NULL) *messages=(char *) AcquireQuantumMemory((size_t) length+1,sizeof(char *)); else { offset=(ssize_t) strlen(*messages); *messages=(char *) ResizeQuantumMemory(*messages,(size_t) offset+length+1, sizeof(char *)); } if (*messages == (char *) NULL) return(0); (void) memcpy(*messages+offset,message,(size_t) length); (*messages)[length+offset] ='\0'; return(length); } #endif static MagickBooleanType InvokePostscriptDelegate( const MagickBooleanType verbose,const char *command,char *message, ExceptionInfo *exception) { int status; #if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT) #define SetArgsStart(command,args_start) \ if (args_start == (const char *) NULL) \ { \ if (*command != '"') \ args_start=strchr(command,' '); \ else \ { \ args_start=strchr(command+1,'"'); \ if (args_start != (const char *) NULL) \ args_start++; \ } \ } #define ExecuteGhostscriptCommand(command,status) \ { \ status=ExternalDelegateCommand(MagickFalse,verbose,command,message, \ exception); \ if (status == 0) \ return(MagickTrue); \ if (status < 0) \ return(MagickFalse); \ (void) ThrowMagickException(exception,GetMagickModule(),DelegateError, \ "FailedToExecuteCommand","`%s' (%d)",command,status); \ return(MagickFalse); \ } char **argv, *errors; const char *args_start = (const char *) NULL; const GhostInfo *ghost_info; gs_main_instance *interpreter; gsapi_revision_t revision; int argc, code; register ssize_t i; #if defined(MAGICKCORE_WINDOWS_SUPPORT) ghost_info=NTGhostscriptDLLVectors(); #else GhostInfo ghost_info_struct; ghost_info=(&ghost_info_struct); (void) memset(&ghost_info_struct,0,sizeof(ghost_info_struct)); ghost_info_struct.delete_instance=(void (*)(gs_main_instance *)) gsapi_delete_instance; ghost_info_struct.exit=(int (*)(gs_main_instance *)) gsapi_exit; ghost_info_struct.new_instance=(int (*)(gs_main_instance **,void *)) gsapi_new_instance; ghost_info_struct.init_with_args=(int (*)(gs_main_instance *,int,char **)) gsapi_init_with_args; ghost_info_struct.run_string=(int (*)(gs_main_instance *,const char *,int, int *)) gsapi_run_string; ghost_info_struct.set_stdio=(int (*)(gs_main_instance *,int (*)(void *,char *, int),int (*)(void *,const char *,int),int (*)(void *, const char *, int))) gsapi_set_stdio; ghost_info_struct.revision=(int (*)(gsapi_revision_t *,int)) gsapi_revision; #endif if (ghost_info == (GhostInfo *) NULL) ExecuteGhostscriptCommand(command,status); if ((ghost_info->revision)(&revision,(int) sizeof(revision)) != 0) revision.revision=0; if (verbose != MagickFalse) { (void) fprintf(stdout,"[ghostscript library %.2f]",(double) revision.revision/100.0); SetArgsStart(command,args_start); (void) fputs(args_start,stdout); } interpreter=(gs_main_instance *) NULL; errors=(char *) NULL; status=(ghost_info->new_instance)(&interpreter,(void *) &errors); if (status < 0) ExecuteGhostscriptCommand(command,status); code=0; argv=StringToArgv(command,&argc); if (argv == (char **) NULL) { (ghost_info->delete_instance)(interpreter); return(MagickFalse); } (void) (ghost_info->set_stdio)(interpreter,(int (MagickDLLCall *)(void *, char *,int)) NULL,PostscriptDelegateMessage,PostscriptDelegateMessage); status=(ghost_info->init_with_args)(interpreter,argc-1,argv+1); if (status == 0) status=(ghost_info->run_string)(interpreter,"systemdict /start get exec\n", 0,&code); (ghost_info->exit)(interpreter); (ghost_info->delete_instance)(interpreter); for (i=0; i < (ssize_t) argc; i++) argv[i]=DestroyString(argv[i]); argv=(char **) RelinquishMagickMemory(argv); if (status != 0) { SetArgsStart(command,args_start); if (status == -101) /* quit */ (void) FormatLocaleString(message,MagickPathExtent, "[ghostscript library %.2f]%s: %s",(double) revision.revision/100.0, args_start,errors); else { (void) ThrowMagickException(exception,GetMagickModule(), DelegateError,"PostscriptDelegateFailed", "`[ghostscript library %.2f]%s': %s",(double) revision.revision/ 100.0,args_start,errors); if (errors != (char *) NULL) errors=DestroyString(errors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Ghostscript returns status %d, exit code %d",status,code); return(MagickFalse); } } if (errors != (char *) NULL) errors=DestroyString(errors); return(MagickTrue); #else status=ExternalDelegateCommand(MagickFalse,verbose,command,message,exception); return(status == 0 ? MagickTrue : MagickFalse); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPS() returns MagickTrue if the image format type, identified by the % magick string, is PS. % % The format of the IsPS method is: % % MagickBooleanType IsPS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPS(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (memcmp(magick,"%!",2) == 0) return(MagickTrue); if (memcmp(magick,"\004%!",3) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSImage() reads a Postscript image file and returns it. It allocates % the memory necessary for the new Image structure and returns a pointer % to the new image. % % The format of the ReadPSImage method is: % % Image *ReadPSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsPostscriptRendered(const char *path) { MagickBooleanType status; struct stat attributes; if ((path == (const char *) NULL) || (*path == '\0')) return(MagickFalse); status=GetPathAttributes(path,&attributes); if ((status != MagickFalse) && S_ISREG(attributes.st_mode) && (attributes.st_size > 0)) return(MagickTrue); return(MagickFalse); } static inline int ProfileInteger(Image *image,short int *hex_digits) { int c, l, value; register ssize_t i; l=0; value=0; for (i=0; i < 2; ) { c=ReadBlobByte(image); if ((c == EOF) || ((c == '%') && (l == '%'))) { value=(-1); break; } l=c; c&=0xff; if (isxdigit(c) == MagickFalse) continue; value=(int) ((size_t) value << 4)+hex_digits[c]; i++; } return(value); } static Image *ReadPSImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define BoundingBox "BoundingBox:" #define BeginDocument "BeginDocument:" #define BeginXMPPacket "<?xpacket begin=" #define EndXMPPacket "<?xpacket end=" #define ICCProfile "BeginICCProfile:" #define CMYKCustomColor "CMYKCustomColor:" #define CMYKProcessColor "CMYKProcessColor:" #define DocumentMedia "DocumentMedia:" #define DocumentCustomColors "DocumentCustomColors:" #define DocumentProcessColors "DocumentProcessColors:" #define EndDocument "EndDocument:" #define HiResBoundingBox "HiResBoundingBox:" #define ImageData "ImageData:" #define PageBoundingBox "PageBoundingBox:" #define LanguageLevel "LanguageLevel:" #define PageMedia "PageMedia:" #define Pages "Pages:" #define PhotoshopProfile "BeginPhotoshop:" #define PostscriptLevel "!PS-" #define RenderPostscriptText " Rendering Postscript... " #define SpotColor "+ " char command[MagickPathExtent], *density, filename[MagickPathExtent], geometry[MagickPathExtent], input_filename[MagickPathExtent], message[MagickPathExtent], *options, postscript_filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; GeometryInfo geometry_info; Image *image, *next, *postscript_image; ImageInfo *read_info; int c, file; MagickBooleanType cmyk, fitPage, skip, status; MagickStatusType flags; PointInfo delta, resolution; RectangleInfo page; register char *p; register ssize_t i; SegmentInfo bounds, hires_bounds; short int hex_digits[256]; size_t length; ssize_t count, priority; StringInfo *profile; unsigned long columns, extent, language_level, pages, rows, scene, spotcolor; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } status=AcquireUniqueSymbolicLink(image_info->filename,input_filename); if (status == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile", image_info->filename); image=DestroyImageList(image); return((Image *) NULL); } /* Initialize hex values. */ (void) memset(hex_digits,0,sizeof(hex_digits)); hex_digits[(int) '0']=0; hex_digits[(int) '1']=1; hex_digits[(int) '2']=2; hex_digits[(int) '3']=3; hex_digits[(int) '4']=4; hex_digits[(int) '5']=5; hex_digits[(int) '6']=6; hex_digits[(int) '7']=7; hex_digits[(int) '8']=8; hex_digits[(int) '9']=9; hex_digits[(int) 'a']=10; hex_digits[(int) 'b']=11; hex_digits[(int) 'c']=12; hex_digits[(int) 'd']=13; hex_digits[(int) 'e']=14; hex_digits[(int) 'f']=15; hex_digits[(int) 'A']=10; hex_digits[(int) 'B']=11; hex_digits[(int) 'C']=12; hex_digits[(int) 'D']=13; hex_digits[(int) 'E']=14; hex_digits[(int) 'F']=15; /* Set the page density. */ delta.x=DefaultResolution; delta.y=DefaultResolution; if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0)) { flags=ParseGeometry(PSDensityGeometry,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } if (image_info->density != (char *) NULL) { flags=ParseGeometry(image_info->density,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); resolution=image->resolution; page.width=(size_t) ceil((double) (page.width*resolution.x/delta.x)-0.5); page.height=(size_t) ceil((double) (page.height*resolution.y/delta.y)-0.5); /* Determine page geometry from the Postscript bounding box. */ (void) memset(&bounds,0,sizeof(bounds)); (void) memset(command,0,sizeof(command)); cmyk=image_info->colorspace == CMYKColorspace ? MagickTrue : MagickFalse; (void) memset(&hires_bounds,0,sizeof(hires_bounds)); columns=0; rows=0; priority=0; rows=0; extent=0; spotcolor=0; language_level=1; pages=(~0UL); skip=MagickFalse; p=command; for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image)) { /* Note document structuring comments. */ *p++=(char) c; if ((strchr("\n\r%",c) == (char *) NULL) && ((size_t) (p-command) < (MagickPathExtent-1))) continue; *p='\0'; p=command; /* Skip %%BeginDocument thru %%EndDocument. */ if (LocaleNCompare(BeginDocument,command,strlen(BeginDocument)) == 0) skip=MagickTrue; if (LocaleNCompare(EndDocument,command,strlen(EndDocument)) == 0) skip=MagickFalse; if (skip != MagickFalse) continue; if (LocaleNCompare(PostscriptLevel,command,strlen(PostscriptLevel)) == 0) { (void) SetImageProperty(image,"ps:Level",command+4,exception); if (GlobExpression(command,"*EPSF-*",MagickTrue) != MagickFalse) pages=1; } if (LocaleNCompare(LanguageLevel,command,strlen(LanguageLevel)) == 0) (void) sscanf(command,LanguageLevel " %lu",&language_level); if (LocaleNCompare(Pages,command,strlen(Pages)) == 0) (void) sscanf(command,Pages " %lu",&pages); if (LocaleNCompare(ImageData,command,strlen(ImageData)) == 0) (void) sscanf(command,ImageData " %lu %lu",&columns,&rows); /* Is this a CMYK document? */ length=strlen(DocumentProcessColors); if (LocaleNCompare(DocumentProcessColors,command,length) == 0) { if ((GlobExpression(command,"*Cyan*",MagickTrue) != MagickFalse) || (GlobExpression(command,"*Magenta*",MagickTrue) != MagickFalse) || (GlobExpression(command,"*Yellow*",MagickTrue) != MagickFalse)) cmyk=MagickTrue; } if (LocaleNCompare(CMYKCustomColor,command,strlen(CMYKCustomColor)) == 0) cmyk=MagickTrue; if (LocaleNCompare(CMYKProcessColor,command,strlen(CMYKProcessColor)) == 0) cmyk=MagickTrue; length=strlen(DocumentCustomColors); if ((LocaleNCompare(DocumentCustomColors,command,length) == 0) || (LocaleNCompare(CMYKCustomColor,command,strlen(CMYKCustomColor)) == 0) || (LocaleNCompare(SpotColor,command,strlen(SpotColor)) == 0)) { char property[MagickPathExtent], *value; register char *q; /* Note spot names. */ (void) FormatLocaleString(property,MagickPathExtent, "ps:SpotColor-%.20g",(double) (spotcolor++)); for (q=command; *q != '\0'; q++) if (isspace((int) (unsigned char) *q) != 0) break; value=ConstantString(q); (void) SubstituteString(&value,"(",""); (void) SubstituteString(&value,")",""); (void) StripString(value); if (*value != '\0') (void) SetImageProperty(image,property,value,exception); value=DestroyString(value); continue; } if (image_info->page != (char *) NULL) continue; /* Note region defined by bounding box. */ count=0; i=0; if (LocaleNCompare(BoundingBox,command,strlen(BoundingBox)) == 0) { count=(ssize_t) sscanf(command,BoundingBox " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=2; } if (LocaleNCompare(DocumentMedia,command,strlen(DocumentMedia)) == 0) { count=(ssize_t) sscanf(command,DocumentMedia " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=1; } if (LocaleNCompare(HiResBoundingBox,command,strlen(HiResBoundingBox)) == 0) { count=(ssize_t) sscanf(command,HiResBoundingBox " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=3; } if (LocaleNCompare(PageBoundingBox,command,strlen(PageBoundingBox)) == 0) { count=(ssize_t) sscanf(command,PageBoundingBox " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=1; } if (LocaleNCompare(PageMedia,command,strlen(PageMedia)) == 0) { count=(ssize_t) sscanf(command,PageMedia " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=1; } if ((count != 4) || (i < (ssize_t) priority)) continue; if ((fabs(bounds.x2-bounds.x1) <= fabs(hires_bounds.x2-hires_bounds.x1)) || (fabs(bounds.y2-bounds.y1) <= fabs(hires_bounds.y2-hires_bounds.y1))) if (i == (ssize_t) priority) continue; hires_bounds=bounds; priority=i; } if ((fabs(hires_bounds.x2-hires_bounds.x1) >= MagickEpsilon) && (fabs(hires_bounds.y2-hires_bounds.y1) >= MagickEpsilon)) { /* Set Postscript render geometry. */ (void) FormatLocaleString(geometry,MagickPathExtent,"%gx%g%+.15g%+.15g", hires_bounds.x2-hires_bounds.x1,hires_bounds.y2-hires_bounds.y1, hires_bounds.x1,hires_bounds.y1); (void) SetImageProperty(image,"ps:HiResBoundingBox",geometry,exception); page.width=(size_t) ceil((double) ((hires_bounds.x2-hires_bounds.x1)* resolution.x/delta.x)-0.5); page.height=(size_t) ceil((double) ((hires_bounds.y2-hires_bounds.y1)* resolution.y/delta.y)-0.5); } fitPage=MagickFalse; option=GetImageOption(image_info,"eps:fit-page"); if (option != (char *) NULL) { char *page_geometry; page_geometry=GetPageGeometry(option); flags=ParseMetaGeometry(page_geometry,&page.x,&page.y,&page.width, &page.height); if (flags == NoValue) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidGeometry","`%s'",option); image=DestroyImage(image); return((Image *) NULL); } page.width=(size_t) ceil((double) (page.width*image->resolution.x/delta.x) -0.5); page.height=(size_t) ceil((double) (page.height*image->resolution.y/ delta.y) -0.5); page_geometry=DestroyString(page_geometry); fitPage=MagickTrue; } if (IssRGBCompatibleColorspace(image_info->colorspace) != MagickFalse) cmyk=MagickFalse; /* Create Ghostscript control file. */ file=AcquireUniqueFileResource(postscript_filename); if (file == -1) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", image_info->filename); image=DestroyImageList(image); return((Image *) NULL); } (void) CopyMagickString(command,"/setpagedevice {pop} bind 1 index where {" "dup wcheck {3 1 roll put} {pop def} ifelse} {def} ifelse\n" "<</UseCIEColor true>>setpagedevice\n",MagickPathExtent); count=write(file,command,(unsigned int) strlen(command)); if (image_info->page == (char *) NULL) { char translate_geometry[MagickPathExtent]; (void) FormatLocaleString(translate_geometry,MagickPathExtent, "%g %g translate\n",-bounds.x1,-bounds.y1); count=write(file,translate_geometry,(unsigned int) strlen(translate_geometry)); } file=close(file)-1; /* Render Postscript with the Ghostscript delegate. */ if (image_info->monochrome != MagickFalse) delegate_info=GetDelegateInfo("ps:mono",(char *) NULL,exception); else if (cmyk != MagickFalse) delegate_info=GetDelegateInfo("ps:cmyk",(char *) NULL,exception); else delegate_info=GetDelegateInfo("ps:alpha",(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) RelinquishUniqueFileResource(postscript_filename); image=DestroyImageList(image); return((Image *) NULL); } density=AcquireString(""); options=AcquireString(""); (void) FormatLocaleString(density,MagickPathExtent,"%gx%g",resolution.x, resolution.y); (void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double) page.width,(double) page.height); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; if (read_info->number_scenes != 0) { char pages[MagickPathExtent]; (void) FormatLocaleString(pages,MagickPathExtent,"-dFirstPage=%.20g " "-dLastPage=%.20g ",(double) read_info->scene+1,(double) (read_info->scene+read_info->number_scenes)); (void) ConcatenateMagickString(options,pages,MagickPathExtent); read_info->number_scenes=0; if (read_info->scenes != (char *) NULL) *read_info->scenes='\0'; } if (*image_info->magick == 'E') { option=GetImageOption(image_info,"eps:use-cropbox"); if ((option == (const char *) NULL) || (IsStringTrue(option) != MagickFalse)) (void) ConcatenateMagickString(options,"-dEPSCrop ",MagickPathExtent); if (fitPage != MagickFalse) (void) ConcatenateMagickString(options,"-dEPSFitPage ", MagickPathExtent); } (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) AcquireUniqueFilename(filename); (void) RelinquishUniqueFileResource(filename); (void) ConcatenateMagickString(filename,"%d",MagickPathExtent); (void) FormatLocaleString(command,MagickPathExtent, GetDelegateCommands(delegate_info), read_info->antialias != MagickFalse ? 4 : 1, read_info->antialias != MagickFalse ? 4 : 1,density,options,filename, postscript_filename,input_filename); options=DestroyString(options); density=DestroyString(density); *message='\0'; status=InvokePostscriptDelegate(read_info->verbose,command,message,exception); (void) InterpretImageFilename(image_info,image,filename,1, read_info->filename,exception); if ((status == MagickFalse) || (IsPostscriptRendered(read_info->filename) == MagickFalse)) { (void) ConcatenateMagickString(command," -c showpage",MagickPathExtent); status=InvokePostscriptDelegate(read_info->verbose,command,message, exception); } (void) RelinquishUniqueFileResource(postscript_filename); (void) RelinquishUniqueFileResource(input_filename); postscript_image=(Image *) NULL; if (status == MagickFalse) for (i=1; ; i++) { (void) InterpretImageFilename(image_info,image,filename,(int) i, read_info->filename,exception); if (IsPostscriptRendered(read_info->filename) == MagickFalse) break; (void) RelinquishUniqueFileResource(read_info->filename); } else for (i=1; ; i++) { (void) InterpretImageFilename(image_info,image,filename,(int) i, read_info->filename,exception); if (IsPostscriptRendered(read_info->filename) == MagickFalse) break; read_info->blob=NULL; read_info->length=0; next=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(read_info->filename); if (next == (Image *) NULL) break; AppendImageToList(&postscript_image,next); } (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); if (postscript_image == (Image *) NULL) { if (*message != '\0') (void) ThrowMagickException(exception,GetMagickModule(), DelegateError,"PostscriptDelegateFailed","`%s'",message); image=DestroyImageList(image); return((Image *) NULL); } if (LocaleCompare(postscript_image->magick,"BMP") == 0) { Image *cmyk_image; cmyk_image=ConsolidateCMYKImages(postscript_image,exception); if (cmyk_image != (Image *) NULL) { postscript_image=DestroyImageList(postscript_image); postscript_image=cmyk_image; } } (void) SeekBlob(image,0,SEEK_SET); for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image)) { /* Note document structuring comments. */ *p++=(char) c; if ((strchr("\n\r%",c) == (char *) NULL) && ((size_t) (p-command) < (MagickPathExtent-1))) continue; *p='\0'; p=command; /* Skip %%BeginDocument thru %%EndDocument. */ if (LocaleNCompare(BeginDocument,command,strlen(BeginDocument)) == 0) skip=MagickTrue; if (LocaleNCompare(EndDocument,command,strlen(EndDocument)) == 0) skip=MagickFalse; if (skip != MagickFalse) continue; if (LocaleNCompare(ICCProfile,command,strlen(ICCProfile)) == 0) { unsigned char *datum; /* Read ICC profile. */ profile=AcquireStringInfo(MagickPathExtent); datum=GetStringInfoDatum(profile); for (i=0; (c=ProfileInteger(image,hex_digits)) != EOF; i++) { if (i >= (ssize_t) GetStringInfoLength(profile)) { SetStringInfoLength(profile,(size_t) i << 1); datum=GetStringInfoDatum(profile); } datum[i]=(unsigned char) c; } SetStringInfoLength(profile,(size_t) i+1); (void) SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); continue; } if (LocaleNCompare(PhotoshopProfile,command,strlen(PhotoshopProfile)) == 0) { unsigned char *q; /* Read Photoshop profile. */ count=(ssize_t) sscanf(command,PhotoshopProfile " %lu",&extent); if (count != 1) continue; length=extent; if ((MagickSizeType) length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); profile=BlobToStringInfo((const void *) NULL,length); if (profile != (StringInfo *) NULL) { q=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) length; i++) *q++=(unsigned char) ProfileInteger(image,hex_digits); (void) SetImageProfile(image,"8bim",profile,exception); profile=DestroyStringInfo(profile); } continue; } if (LocaleNCompare(BeginXMPPacket,command,strlen(BeginXMPPacket)) == 0) { /* Read XMP profile. */ p=command; profile=StringToStringInfo(command); for (i=(ssize_t) GetStringInfoLength(profile)-1; c != EOF; i++) { SetStringInfoLength(profile,(size_t) (i+1)); c=ReadBlobByte(image); GetStringInfoDatum(profile)[i]=(unsigned char) c; *p++=(char) c; if ((strchr("\n\r%",c) == (char *) NULL) && ((size_t) (p-command) < (MagickPathExtent-1))) continue; *p='\0'; p=command; if (LocaleNCompare(EndXMPPacket,command,strlen(EndXMPPacket)) == 0) break; } SetStringInfoLength(profile,(size_t) i); (void) SetImageProfile(image,"xmp",profile,exception); profile=DestroyStringInfo(profile); continue; } } (void) CloseBlob(image); if (image_info->number_scenes != 0) { Image *clone_image; /* Add place holder images to meet the subimage specification requirement. */ for (i=0; i < (ssize_t) image_info->scene; i++) { clone_image=CloneImage(postscript_image,1,1,MagickTrue,exception); if (clone_image != (Image *) NULL) PrependImageToList(&postscript_image,clone_image); } } do { (void) CopyMagickString(postscript_image->filename,filename, MagickPathExtent); (void) CopyMagickString(postscript_image->magick,image->magick, MagickPathExtent); if (columns != 0) postscript_image->magick_columns=columns; if (rows != 0) postscript_image->magick_rows=rows; postscript_image->page=page; (void) CloneImageProfiles(postscript_image,image); (void) CloneImageProperties(postscript_image,image); next=SyncNextImageInList(postscript_image); if (next != (Image *) NULL) postscript_image=next; } while (next != (Image *) NULL); image=DestroyImageList(image); scene=0; for (next=GetFirstImageInList(postscript_image); next != (Image *) NULL; ) { next->scene=scene++; next=GetNextImageInList(next); } return(GetFirstImageInList(postscript_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSImage() adds properties for the PS image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSImage method is: % % size_t RegisterPSImage(void) % */ ModuleExport size_t RegisterPSImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PS","EPI", "Encapsulated PostScript Interchange format"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderBlobSupportFlag; entry->mime_type=ConstantString("application/postscript"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PS","EPS","Encapsulated PostScript"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderBlobSupportFlag; entry->mime_type=ConstantString("application/postscript"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PS","EPSF","Encapsulated PostScript"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderBlobSupportFlag; entry->mime_type=ConstantString("application/postscript"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PS","EPSI", "Encapsulated PostScript Interchange format"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderBlobSupportFlag; entry->mime_type=ConstantString("application/postscript"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PS","PS","PostScript"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->mime_type=ConstantString("application/postscript"); entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderBlobSupportFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSImage() removes format registrations made by the % PS module from the list of supported formats. % % The format of the UnregisterPSImage method is: % % UnregisterPSImage(void) % */ ModuleExport void UnregisterPSImage(void) { (void) UnregisterMagickInfo("EPI"); (void) UnregisterMagickInfo("EPS"); (void) UnregisterMagickInfo("EPSF"); (void) UnregisterMagickInfo("EPSI"); (void) UnregisterMagickInfo("PS"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSImage translates an image to encapsulated Postscript % Level I for printing. If the supplied geometry is null, the image is % centered on the Postscript page. Otherwise, the image is positioned as % specified by the geometry. % % The format of the WritePSImage method is: % % MagickBooleanType WritePSImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline unsigned char *PopHexPixel(const char hex_digits[][3], const size_t pixel,unsigned char *pixels) { register const char *hex; hex=hex_digits[pixel]; *pixels++=(unsigned char) (*hex++ & 0xff); *pixels++=(unsigned char) (*hex & 0xff); return(pixels); } static MagickBooleanType WritePSImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { #define WriteRunlengthPacket(image,pixel,length,p) \ { \ if ((image->alpha_trait != UndefinedPixelTrait) && (length != 0) && \ (GetPixelAlpha(image,p) == (Quantum) TransparentAlpha)) \ { \ q=PopHexPixel(hex_digits,0xff,q); \ q=PopHexPixel(hex_digits,0xff,q); \ q=PopHexPixel(hex_digits,0xff,q); \ } \ else \ { \ q=PopHexPixel(hex_digits,ScaleQuantumToChar(ClampToQuantum(pixel.red)),q); \ q=PopHexPixel(hex_digits,ScaleQuantumToChar(ClampToQuantum(pixel.green)),q); \ q=PopHexPixel(hex_digits,ScaleQuantumToChar(ClampToQuantum(pixel.blue)),q); \ } \ q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q); \ } static const char hex_digits[][3] = { "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "0A", "0B", "0C", "0D", "0E", "0F", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "1A", "1B", "1C", "1D", "1E", "1F", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "2A", "2B", "2C", "2D", "2E", "2F", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "3A", "3B", "3C", "3D", "3E", "3F", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "4A", "4B", "4C", "4D", "4E", "4F", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "5A", "5B", "5C", "5D", "5E", "5F", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "6A", "6B", "6C", "6D", "6E", "6F", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "7A", "7B", "7C", "7D", "7E", "7F", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "8A", "8B", "8C", "8D", "8E", "8F", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "9A", "9B", "9C", "9D", "9E", "9F", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "A9", "AA", "AB", "AC", "AD", "AE", "AF", "B0", "B1", "B2", "B3", "B4", "B5", "B6", "B7", "B8", "B9", "BA", "BB", "BC", "BD", "BE", "BF", "C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "CA", "CB", "CC", "CD", "CE", "CF", "D0", "D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8", "D9", "DA", "DB", "DC", "DD", "DE", "DF", "E0", "E1", "E2", "E3", "E4", "E5", "E6", "E7", "E8", "E9", "EA", "EB", "EC", "ED", "EE", "EF", "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "FA", "FB", "FC", "FD", "FE", "FF" }, PostscriptProlog[] = "%%BeginProlog\n" "%\n" "% Display a color image. The image is displayed in color on\n" "% Postscript viewers or printers that support color, otherwise\n" "% it is displayed as grayscale.\n" "%\n" "/DirectClassPacket\n" "{\n" " %\n" " % Get a DirectClass packet.\n" " %\n" " % Parameters:\n" " % red.\n" " % green.\n" " % blue.\n" " % length: number of pixels minus one of this color (optional).\n" " %\n" " currentfile color_packet readhexstring pop pop\n" " compression 0 eq\n" " {\n" " /number_pixels 3 def\n" " }\n" " {\n" " currentfile byte readhexstring pop 0 get\n" " /number_pixels exch 1 add 3 mul def\n" " } ifelse\n" " 0 3 number_pixels 1 sub\n" " {\n" " pixels exch color_packet putinterval\n" " } for\n" " pixels 0 number_pixels getinterval\n" "} bind def\n" "\n" "/DirectClassImage\n" "{\n" " %\n" " % Display a DirectClass image.\n" " %\n" " systemdict /colorimage known\n" " {\n" " columns rows 8\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { DirectClassPacket } false 3 colorimage\n" " }\n" " {\n" " %\n" " % No colorimage operator; convert to grayscale.\n" " %\n" " columns rows 8\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { GrayDirectClassPacket } image\n" " } ifelse\n" "} bind def\n" "\n" "/GrayDirectClassPacket\n" "{\n" " %\n" " % Get a DirectClass packet; convert to grayscale.\n" " %\n" " % Parameters:\n" " % red\n" " % green\n" " % blue\n" " % length: number of pixels minus one of this color (optional).\n" " %\n" " currentfile color_packet readhexstring pop pop\n" " color_packet 0 get 0.299 mul\n" " color_packet 1 get 0.587 mul add\n" " color_packet 2 get 0.114 mul add\n" " cvi\n" " /gray_packet exch def\n" " compression 0 eq\n" " {\n" " /number_pixels 1 def\n" " }\n" " {\n" " currentfile byte readhexstring pop 0 get\n" " /number_pixels exch 1 add def\n" " } ifelse\n" " 0 1 number_pixels 1 sub\n" " {\n" " pixels exch gray_packet put\n" " } for\n" " pixels 0 number_pixels getinterval\n" "} bind def\n" "\n" "/GrayPseudoClassPacket\n" "{\n" " %\n" " % Get a PseudoClass packet; convert to grayscale.\n" " %\n" " % Parameters:\n" " % index: index into the colormap.\n" " % length: number of pixels minus one of this color (optional).\n" " %\n" " currentfile byte readhexstring pop 0 get\n" " /offset exch 3 mul def\n" " /color_packet colormap offset 3 getinterval def\n" " color_packet 0 get 0.299 mul\n" " color_packet 1 get 0.587 mul add\n" " color_packet 2 get 0.114 mul add\n" " cvi\n" " /gray_packet exch def\n" " compression 0 eq\n" " {\n" " /number_pixels 1 def\n" " }\n" " {\n" " currentfile byte readhexstring pop 0 get\n" " /number_pixels exch 1 add def\n" " } ifelse\n" " 0 1 number_pixels 1 sub\n" " {\n" " pixels exch gray_packet put\n" " } for\n" " pixels 0 number_pixels getinterval\n" "} bind def\n" "\n" "/PseudoClassPacket\n" "{\n" " %\n" " % Get a PseudoClass packet.\n" " %\n" " % Parameters:\n" " % index: index into the colormap.\n" " % length: number of pixels minus one of this color (optional).\n" " %\n" " currentfile byte readhexstring pop 0 get\n" " /offset exch 3 mul def\n" " /color_packet colormap offset 3 getinterval def\n" " compression 0 eq\n" " {\n" " /number_pixels 3 def\n" " }\n" " {\n" " currentfile byte readhexstring pop 0 get\n" " /number_pixels exch 1 add 3 mul def\n" " } ifelse\n" " 0 3 number_pixels 1 sub\n" " {\n" " pixels exch color_packet putinterval\n" " } for\n" " pixels 0 number_pixels getinterval\n" "} bind def\n" "\n" "/PseudoClassImage\n" "{\n" " %\n" " % Display a PseudoClass image.\n" " %\n" " % Parameters:\n" " % class: 0-PseudoClass or 1-Grayscale.\n" " %\n" " currentfile buffer readline pop\n" " token pop /class exch def pop\n" " class 0 gt\n" " {\n" " currentfile buffer readline pop\n" " token pop /depth exch def pop\n" " /grays columns 8 add depth sub depth mul 8 idiv string def\n" " columns rows depth\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { currentfile grays readhexstring pop } image\n" " }\n" " {\n" " %\n" " % Parameters:\n" " % colors: number of colors in the colormap.\n" " % colormap: red, green, blue color packets.\n" " %\n" " currentfile buffer readline pop\n" " token pop /colors exch def pop\n" " /colors colors 3 mul def\n" " /colormap colors string def\n" " currentfile colormap readhexstring pop pop\n" " systemdict /colorimage known\n" " {\n" " columns rows 8\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { PseudoClassPacket } false 3 colorimage\n" " }\n" " {\n" " %\n" " % No colorimage operator; convert to grayscale.\n" " %\n" " columns rows 8\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { GrayPseudoClassPacket } image\n" " } ifelse\n" " } ifelse\n" "} bind def\n" "\n" "/DisplayImage\n" "{\n" " %\n" " % Display a DirectClass or PseudoClass image.\n" " %\n" " % Parameters:\n" " % x & y translation.\n" " % x & y scale.\n" " % label pointsize.\n" " % image label.\n" " % image columns & rows.\n" " % class: 0-DirectClass or 1-PseudoClass.\n" " % compression: 0-none or 1-RunlengthEncoded.\n" " % hex color packets.\n" " %\n" " gsave\n" " /buffer 512 string def\n" " /byte 1 string def\n" " /color_packet 3 string def\n" " /pixels 768 string def\n" "\n" " currentfile buffer readline pop\n" " token pop /x exch def\n" " token pop /y exch def pop\n" " x y translate\n" " currentfile buffer readline pop\n" " token pop /x exch def\n" " token pop /y exch def pop\n" " currentfile buffer readline pop\n" " token pop /pointsize exch def pop\n", PostscriptEpilog[] = " x y scale\n" " currentfile buffer readline pop\n" " token pop /columns exch def\n" " token pop /rows exch def pop\n" " currentfile buffer readline pop\n" " token pop /class exch def pop\n" " currentfile buffer readline pop\n" " token pop /compression exch def pop\n" " class 0 gt { PseudoClassImage } { DirectClassImage } ifelse\n" " grestore\n"; char buffer[MagickPathExtent], date[MagickPathExtent], **labels, page_geometry[MagickPathExtent]; CompressionType compression; const char *value; const StringInfo *profile; double pointsize; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType scene; MagickStatusType flags; PixelInfo pixel; PointInfo delta, resolution, scale; Quantum index; RectangleInfo geometry, media_info, page_info; register const Quantum *p; register ssize_t i, x; register unsigned char *q; SegmentInfo bounds; size_t bit, byte, imageListLength, length, page, text_size; ssize_t j, y; time_t timer; unsigned char pixels[2048]; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) memset(&bounds,0,sizeof(bounds)); compression=image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; page=1; scene=0; imageListLength=GetImageListLength(image); do { /* Scale relative to dots-per-inch. */ (void) TransformImageColorspace(image,sRGBColorspace,exception); delta.x=DefaultResolution; delta.y=DefaultResolution; resolution.x=image->resolution.x; resolution.y=image->resolution.y; if ((resolution.x == 0.0) || (resolution.y == 0.0)) { flags=ParseGeometry(PSDensityGeometry,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) resolution.y=resolution.x; } if (image_info->density != (char *) NULL) { flags=ParseGeometry(image_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) resolution.y=resolution.x; } if (image->units == PixelsPerCentimeterResolution) { resolution.x=(double) ((size_t) (100.0*2.54*resolution.x+0.5)/100.0); resolution.y=(double) ((size_t) (100.0*2.54*resolution.y+0.5)/100.0); } SetGeometry(image,&geometry); (void) FormatLocaleString(page_geometry,MagickPathExtent,"%.20gx%.20g", (double) image->columns,(double) image->rows); if (image_info->page != (char *) NULL) (void) CopyMagickString(page_geometry,image_info->page,MagickPathExtent); else if ((image->page.width != 0) && (image->page.height != 0)) (void) FormatLocaleString(page_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) image->page.width,(double) image->page.height,(double) image->page.x,(double) image->page.y); else if ((image->gravity != UndefinedGravity) && (LocaleCompare(image_info->magick,"PS") == 0)) (void) CopyMagickString(page_geometry,PSPageGeometry, MagickPathExtent); (void) ConcatenateMagickString(page_geometry,">",MagickPathExtent); (void) ParseMetaGeometry(page_geometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); scale.x=PerceptibleReciprocal(resolution.x)*geometry.width*delta.x; geometry.width=(size_t) floor(scale.x+0.5); scale.y=PerceptibleReciprocal(resolution.y)*geometry.height*delta.y; geometry.height=(size_t) floor(scale.y+0.5); (void) ParseAbsoluteGeometry(page_geometry,&media_info); (void) ParseGravityGeometry(image,page_geometry,&page_info,exception); if (image->gravity != UndefinedGravity) { geometry.x=(-page_info.x); geometry.y=(ssize_t) (media_info.height+page_info.y-image->rows); } pointsize=12.0; if (image_info->pointsize != 0.0) pointsize=image_info->pointsize; text_size=0; value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) text_size=(size_t) (MultilineCensus(value)*pointsize+12); if (page == 1) { /* Output Postscript header. */ if (LocaleCompare(image_info->magick,"PS") == 0) (void) CopyMagickString(buffer,"%!PS-Adobe-3.0\n",MagickPathExtent); else (void) CopyMagickString(buffer,"%!PS-Adobe-3.0 EPSF-3.0\n", MagickPathExtent); (void) WriteBlobString(image,buffer); (void) WriteBlobString(image,"%%Creator: (ImageMagick)\n"); (void) FormatLocaleString(buffer,MagickPathExtent,"%%%%Title: (%s)\n", image->filename); (void) WriteBlobString(image,buffer); timer=GetMagickTime(); (void) FormatMagickTime(timer,MagickPathExtent,date); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%CreationDate: (%s)\n",date); (void) WriteBlobString(image,buffer); bounds.x1=(double) geometry.x; bounds.y1=(double) geometry.y; bounds.x2=(double) geometry.x+scale.x; bounds.y2=(double) geometry.y+(geometry.height+text_size); if ((image_info->adjoin != MagickFalse) && (GetNextImageInList(image) != (Image *) NULL)) (void) CopyMagickString(buffer,"%%%%BoundingBox: (atend)\n", MagickPathExtent); else { (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%BoundingBox: %.20g %.20g %.20g %.20g\n",ceil(bounds.x1-0.5), ceil(bounds.y1-0.5),floor(bounds.x2+0.5),floor(bounds.y2+0.5)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%HiResBoundingBox: %g %g %g %g\n",bounds.x1, bounds.y1,bounds.x2,bounds.y2); } (void) WriteBlobString(image,buffer); profile=GetImageProfile(image,"8bim"); if (profile != (StringInfo *) NULL) { /* Embed Photoshop profile. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%%BeginPhotoshop: %.20g",(double) GetStringInfoLength(profile)); (void) WriteBlobString(image,buffer); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) { if ((i % 32) == 0) (void) WriteBlobString(image,"\n% "); (void) FormatLocaleString(buffer,MagickPathExtent,"%02X", (unsigned int) (GetStringInfoDatum(profile)[i] & 0xff)); (void) WriteBlobString(image,buffer); } (void) WriteBlobString(image,"\n%EndPhotoshop\n"); } profile=GetImageProfile(image,"xmp"); DisableMSCWarning(4127) if (0 && (profile != (StringInfo *) NULL)) RestoreMSCWarning { /* Embed XML profile. */ (void) WriteBlobString(image,"\n%begin_xml_code\n"); (void) FormatLocaleString(buffer,MagickPathExtent, "\n%%begin_xml_packet: %.20g\n",(double) GetStringInfoLength(profile)); (void) WriteBlobString(image,buffer); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) (void) WriteBlobByte(image,GetStringInfoDatum(profile)[i]); (void) WriteBlobString(image,"\n%end_xml_packet\n%end_xml_code\n"); } value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) (void) WriteBlobString(image, "%%DocumentNeededResources: font Times-Roman\n"); (void) WriteBlobString(image,"%%DocumentData: Clean7Bit\n"); (void) WriteBlobString(image,"%%LanguageLevel: 1\n"); if (LocaleCompare(image_info->magick,"PS") != 0) (void) WriteBlobString(image,"%%Pages: 1\n"); else { /* Compute the number of pages. */ (void) WriteBlobString(image,"%%Orientation: Portrait\n"); (void) WriteBlobString(image,"%%PageOrder: Ascend\n"); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%Pages: %.20g\n",image_info->adjoin != MagickFalse ? (double) imageListLength : 1.0); (void) WriteBlobString(image,buffer); } (void) WriteBlobString(image,"%%EndComments\n"); (void) WriteBlobString(image,"\n%%BeginDefaults\n"); (void) WriteBlobString(image,"%%EndDefaults\n\n"); if ((LocaleCompare(image_info->magick,"EPI") == 0) || (LocaleCompare(image_info->magick,"EPSI") == 0) || (LocaleCompare(image_info->magick,"EPT") == 0)) { Image *preview_image; Quantum pixel; register ssize_t x; ssize_t y; /* Create preview image. */ preview_image=CloneImage(image,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Dump image as bitmap. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%BeginPreview: %.20g %.20g %.20g %.20g\n%% ",(double) preview_image->columns,(double) preview_image->rows,1.0, (double) ((((preview_image->columns+7) >> 3)*preview_image->rows+ 35)/36)); (void) WriteBlobString(image,buffer); q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(preview_image,0,y,preview_image->columns,1, exception); if (p == (const Quantum *) NULL) break; bit=0; byte=0; for (x=0; x < (ssize_t) preview_image->columns; x++) { byte<<=1; pixel=ClampToQuantum(GetPixelLuma(preview_image,p)); if (pixel >= (Quantum) (QuantumRange/2)) byte|=0x01; bit++; if (bit == 8) { q=PopHexPixel(hex_digits,byte,q); if ((q-pixels+8) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; (void) WriteBlobString(image,"% "); }; bit=0; byte=0; } } if (bit != 0) { byte<<=(8-bit); q=PopHexPixel(hex_digits,byte,q); if ((q-pixels+8) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; (void) WriteBlobString(image,"% "); }; }; } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } (void) WriteBlobString(image,"\n%%EndPreview\n"); preview_image=DestroyImage(preview_image); } /* Output Postscript commands. */ (void) WriteBlob(image,sizeof(PostscriptProlog)-1, (const unsigned char *) PostscriptProlog); value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) { (void) WriteBlobString(image, " /Times-Roman findfont pointsize scalefont setfont\n"); for (j=(ssize_t) MultilineCensus(value)-1; j >= 0; j--) { (void) WriteBlobString(image," /label 512 string def\n"); (void) WriteBlobString(image, " currentfile label readline pop\n"); (void) FormatLocaleString(buffer,MagickPathExtent, " 0 y %g add moveto label show pop\n",j*pointsize+12); (void) WriteBlobString(image,buffer); } } (void) WriteBlob(image,sizeof(PostscriptEpilog)-1, (const unsigned char *) PostscriptEpilog); if (LocaleCompare(image_info->magick,"PS") == 0) (void) WriteBlobString(image," showpage\n"); (void) WriteBlobString(image,"} bind def\n"); (void) WriteBlobString(image,"%%EndProlog\n"); } (void) FormatLocaleString(buffer,MagickPathExtent,"%%%%Page: 1 %.20g\n", (double) (page++)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%PageBoundingBox: %.20g %.20g %.20g %.20g\n",(double) geometry.x, (double) geometry.y,geometry.x+(double) geometry.width,geometry.y+(double) (geometry.height+text_size)); (void) WriteBlobString(image,buffer); if ((double) geometry.x < bounds.x1) bounds.x1=(double) geometry.x; if ((double) geometry.y < bounds.y1) bounds.y1=(double) geometry.y; if ((double) (geometry.x+geometry.width-1) > bounds.x2) bounds.x2=(double) geometry.x+geometry.width-1; if ((double) (geometry.y+(geometry.height+text_size)-1) > bounds.y2) bounds.y2=(double) geometry.y+(geometry.height+text_size)-1; value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) (void) WriteBlobString(image,"%%%%PageResources: font Times-Roman\n"); if (LocaleCompare(image_info->magick,"PS") != 0) (void) WriteBlobString(image,"userdict begin\n"); (void) WriteBlobString(image,"DisplayImage\n"); /* Output image data. */ (void) FormatLocaleString(buffer,MagickPathExtent,"%.20g %.20g\n%g %g\n%g\n", (double) geometry.x,(double) geometry.y,scale.x,scale.y,pointsize); (void) WriteBlobString(image,buffer); labels=(char **) NULL; value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) labels=StringToList(value); if (labels != (char **) NULL) { for (i=0; labels[i] != (char *) NULL; i++) { (void) FormatLocaleString(buffer,MagickPathExtent,"%s \n", labels[i]); (void) WriteBlobString(image,buffer); labels[i]=DestroyString(labels[i]); } labels=(char **) RelinquishMagickMemory(labels); } (void) memset(&pixel,0,sizeof(pixel)); pixel.alpha=(MagickRealType) TransparentAlpha; index=(Quantum) 0; x=0; if ((image_info->type != TrueColorType) && (SetImageGray(image,exception) != MagickFalse)) { if (SetImageMonochrome(image,exception) == MagickFalse) { Quantum pixel; /* Dump image as grayscale. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%.20g %.20g\n1\n1\n1\n8\n",(double) image->columns,(double) image->rows); (void) WriteBlobString(image,buffer); q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { pixel=(Quantum) ScaleQuantumToChar(ClampToQuantum(GetPixelLuma( image,p))); q=PopHexPixel(hex_digits,(size_t) pixel,q); if ((q-pixels+8) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } } else { ssize_t y; Quantum pixel; /* Dump image as bitmap. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%.20g %.20g\n1\n1\n1\n1\n",(double) image->columns,(double) image->rows); (void) WriteBlobString(image,buffer); q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; pixel=ClampToQuantum(GetPixelLuma(image,p)); if (pixel >= (Quantum) (QuantumRange/2)) byte|=0x01; bit++; if (bit == 8) { q=PopHexPixel(hex_digits,byte,q); if ((q-pixels+2) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; }; bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); q=PopHexPixel(hex_digits,byte,q); if ((q-pixels+2) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } }; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } } } else if ((image->storage_class == DirectClass) || (image->colors > 256) || (image->alpha_trait != UndefinedPixelTrait)) { /* Dump DirectClass image. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%.20g %.20g\n0\n%d\n",(double) image->columns,(double) image->rows, compression == RLECompression ? 1 : 0); (void) WriteBlobString(image,buffer); switch (compression) { case RLECompression: { /* Dump runlength-encoded DirectColor packets. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; GetPixelInfoPixel(image,p,&pixel); length=255; for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(image,p) == ClampToQuantum(pixel.red)) && (GetPixelGreen(image,p) == ClampToQuantum(pixel.green)) && (GetPixelBlue(image,p) == ClampToQuantum(pixel.blue)) && (GetPixelAlpha(image,p) == ClampToQuantum(pixel.alpha)) && (length < 255) && (x < (ssize_t) (image->columns-1))) length++; else { if (x > 0) { WriteRunlengthPacket(image,pixel,length,p); if ((q-pixels+10) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } } length=0; } GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); } WriteRunlengthPacket(image,pixel,length,p); if ((q-pixels+10) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } break; } case NoCompression: default: { /* Dump uncompressed DirectColor packets. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if ((image->alpha_trait != UndefinedPixelTrait) && (GetPixelAlpha(image,p) == (Quantum) TransparentAlpha)) { q=PopHexPixel(hex_digits,0xff,q); q=PopHexPixel(hex_digits,0xff,q); q=PopHexPixel(hex_digits,0xff,q); } else { q=PopHexPixel(hex_digits,ScaleQuantumToChar( GetPixelRed(image,p)),q); q=PopHexPixel(hex_digits,ScaleQuantumToChar( GetPixelGreen(image,p)),q); q=PopHexPixel(hex_digits,ScaleQuantumToChar( GetPixelBlue(image,p)),q); } if ((q-pixels+6) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } break; } } (void) WriteBlobByte(image,'\n'); } else { /* Dump PseudoClass image. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%.20g %.20g\n%d\n%d\n0\n",(double) image->columns,(double) image->rows,image->storage_class == PseudoClass ? 1 : 0, compression == RLECompression ? 1 : 0); (void) WriteBlobString(image,buffer); /* Dump number of colors and colormap. */ (void) FormatLocaleString(buffer,MagickPathExtent,"%.20g\n",(double) image->colors); (void) WriteBlobString(image,buffer); for (i=0; i < (ssize_t) image->colors; i++) { (void) FormatLocaleString(buffer,MagickPathExtent,"%02X%02X%02X\n", ScaleQuantumToChar(ClampToQuantum(image->colormap[i].red)), ScaleQuantumToChar(ClampToQuantum(image->colormap[i].green)), ScaleQuantumToChar(ClampToQuantum(image->colormap[i].blue))); (void) WriteBlobString(image,buffer); } switch (compression) { case RLECompression: { /* Dump runlength-encoded PseudoColor packets. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; index=GetPixelIndex(image,p); length=255; for (x=0; x < (ssize_t) image->columns; x++) { if ((index == GetPixelIndex(image,p)) && (length < 255) && (x < ((ssize_t) image->columns-1))) length++; else { if (x > 0) { q=PopHexPixel(hex_digits,(size_t) index,q); q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q); i++; if ((q-pixels+6) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } } length=0; } index=GetPixelIndex(image,p); pixel.red=(MagickRealType) GetPixelRed(image,p); pixel.green=(MagickRealType) GetPixelGreen(image,p); pixel.blue=(MagickRealType) GetPixelBlue(image,p); pixel.alpha=(MagickRealType) GetPixelAlpha(image,p); p+=GetPixelChannels(image); } q=PopHexPixel(hex_digits,(size_t) index,q); q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q); if ((q-pixels+6) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } break; } case NoCompression: default: { /* Dump uncompressed PseudoColor packets. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { q=PopHexPixel(hex_digits,(size_t) GetPixelIndex(image,p),q); if ((q-pixels+4) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } break; } } (void) WriteBlobByte(image,'\n'); } if (LocaleCompare(image_info->magick,"PS") != 0) (void) WriteBlobString(image,"end\n"); (void) WriteBlobString(image,"%%PageTrailer\n"); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); (void) WriteBlobString(image,"%%Trailer\n"); if (page > 2) { (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%BoundingBox: %.20g %.20g %.20g %.20g\n",ceil(bounds.x1-0.5), ceil(bounds.y1-0.5),floor(bounds.x2-0.5),floor(bounds.y2-0.5)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%HiResBoundingBox: %g %g %g %g\n",bounds.x1,bounds.y1,bounds.x2, bounds.y2); (void) WriteBlobString(image,buffer); } (void) WriteBlobString(image,"%%EOF\n"); (void) CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_909_0
crossvul-cpp_data_bad_2039_0
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.55 2014/02/27 23:26:17 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; (void)&line; if (e >= b && (size_t)(e - b) <= CDF_SEC_SIZE(h) * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u" " > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (pread(info->i_fd, buf, len, off) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SEC_SIZE(h); size_t pos = CDF_SEC_POS(h, id); assert(ss == len); return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SHORT_SEC_SIZE(h); size_t pos = CDF_SHORT_SEC_POS(h, id); assert(ss == len); if (pos + len > CDF_SEC_SIZE(h) * sst->sst_len) { DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", pos + len, CDF_SEC_SIZE(h) * sst->sst_len)); return -1; } (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + pos, len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %" SIZE_T_FORMAT "u >= %" SIZE_T_FORMAT "u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid > maxsector) { DPRINTF(("Sector %d > %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn, const cdf_directory_t **root) { size_t i; const cdf_directory_t *d; *root = NULL; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; *root = d; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; static const char name[] = "\05SummaryInformation"; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name)) == 0) break; if (i == 0) { DPRINTF(("Cannot find summary information section\n")); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { size_t ofs = CDF_GETUINT32(p, (i << 1) + 1); q = (const uint8_t *)(const void *) ((const char *)(const void *)p + ofs - 2 * sizeof(uint32_t)); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i, inp[i].pi_id, inp[i].pi_type, q - p, offs)); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_FLOAT: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); u32 = CDF_TOLE4(u32); memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f)); break; case CDF_DOUBLE: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); u64 = CDF_TOLE8((uint64_t)u64); memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d)); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %" SIZE_T_FORMAT "u, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); if (l & 1) l++; o += l >> 1; if (q + o >= e) goto out; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); break; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t i, maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE2(si->si_count); *count = 0; maxcount = 0; *info = NULL; for (i = 0; i < CDF_TOLE4(si->si_count); i++) { if (i >= CDF_LOOP_LIMIT) { DPRINTF(("Unpack summary info loop limit")); errno = EFTYPE; return -1; } if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) { return -1; } } return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6" SIZE_T_FORMAT "u: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT "u: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { char buf[26]; d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec, buf)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec, buf)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_FLOAT: (void)fprintf(stderr, "float [%g]\n", info[i].pi_f); break; case CDF_DOUBLE: (void)fprintf(stderr, "double [%g]\n", info[i].pi_d); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { char buf[26]; cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec, buf)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2039_0
crossvul-cpp_data_bad_2147_0
/* * Copyright (c) Ian F. Darwin 1986-1995. * Software written by Ian F. Darwin and others; * maintained 1995-present by Christos Zoulas and others. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * softmagic - interpret variable magic from MAGIC */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: softmagic.c,v 1.187 2014/05/13 16:42:17 christos Exp $") #endif /* lint */ #include "magic.h" #include <assert.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include <time.h> #if defined(HAVE_LOCALE_H) #include <locale.h> #endif private int match(struct magic_set *, struct magic *, uint32_t, const unsigned char *, size_t, size_t, int, int, int, int, int *, int *, int *); private int mget(struct magic_set *, const unsigned char *, struct magic *, size_t, size_t, unsigned int, int, int, int, int, int *, int *, int *); private int magiccheck(struct magic_set *, struct magic *); private int32_t mprint(struct magic_set *, struct magic *); private int32_t moffset(struct magic_set *, struct magic *); private void mdebug(uint32_t, const char *, size_t); private int mcopy(struct magic_set *, union VALUETYPE *, int, int, const unsigned char *, uint32_t, size_t, size_t); private int mconvert(struct magic_set *, struct magic *, int); private int print_sep(struct magic_set *, int); private int handle_annotation(struct magic_set *, struct magic *); private void cvt_8(union VALUETYPE *, const struct magic *); private void cvt_16(union VALUETYPE *, const struct magic *); private void cvt_32(union VALUETYPE *, const struct magic *); private void cvt_64(union VALUETYPE *, const struct magic *); #define OFFSET_OOB(n, o, i) ((n) < (o) || (i) > ((n) - (o))) /* * softmagic - lookup one file in parsed, in-memory copy of database * Passed the name and FILE * of one file to be typed. */ /*ARGSUSED1*/ /* nbytes passed for regularity, maybe need later */ protected int file_softmagic(struct magic_set *ms, const unsigned char *buf, size_t nbytes, size_t level, int mode, int text) { struct mlist *ml; int rv, printed_something = 0, need_separator = 0; for (ml = ms->mlist[0]->next; ml != ms->mlist[0]; ml = ml->next) if ((rv = match(ms, ml->magic, ml->nmagic, buf, nbytes, 0, mode, text, 0, level, &printed_something, &need_separator, NULL)) != 0) return rv; return 0; } #define FILE_FMTDEBUG #ifdef FILE_FMTDEBUG #define F(a, b, c) file_fmtcheck((a), (b), (c), __FILE__, __LINE__) private const char * __attribute__((__format_arg__(3))) file_fmtcheck(struct magic_set *ms, const struct magic *m, const char *def, const char *file, size_t line) { const char *ptr = fmtcheck(m->desc, def); if (ptr == def) file_magerror(ms, "%s, %zu: format `%s' does not match with `%s'", file, line, m->desc, def); return ptr; } #else #define F(a, b, c) fmtcheck((b)->desc, (c)) #endif /* * Go through the whole list, stopping if you find a match. Process all * the continuations of that match before returning. * * We support multi-level continuations: * * At any time when processing a successful top-level match, there is a * current continuation level; it represents the level of the last * successfully matched continuation. * * Continuations above that level are skipped as, if we see one, it * means that the continuation that controls them - i.e, the * lower-level continuation preceding them - failed to match. * * Continuations below that level are processed as, if we see one, * it means we've finished processing or skipping higher-level * continuations under the control of a successful or unsuccessful * lower-level continuation, and are now seeing the next lower-level * continuation and should process it. The current continuation * level reverts to the level of the one we're seeing. * * Continuations at the current level are processed as, if we see * one, there's no lower-level continuation that may have failed. * * If a continuation matches, we bump the current continuation level * so that higher-level continuations are processed. */ private int match(struct magic_set *ms, struct magic *magic, uint32_t nmagic, const unsigned char *s, size_t nbytes, size_t offset, int mode, int text, int flip, int recursion_level, int *printed_something, int *need_separator, int *returnval) { uint32_t magindex = 0; unsigned int cont_level = 0; int returnvalv = 0, e; /* if a match is found it is set to 1*/ int firstline = 1; /* a flag to print X\n X\n- X */ int print = (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0; if (returnval == NULL) returnval = &returnvalv; if (file_check_mem(ms, cont_level) == -1) return -1; for (magindex = 0; magindex < nmagic; magindex++) { int flush = 0; struct magic *m = &magic[magindex]; if (m->type != FILE_NAME) if ((IS_STRING(m->type) && #define FLT (STRING_BINTEST | STRING_TEXTTEST) ((text && (m->str_flags & FLT) == STRING_BINTEST) || (!text && (m->str_flags & FLT) == STRING_TEXTTEST))) || (m->flag & mode) != mode) { /* Skip sub-tests */ while (magindex + 1 < nmagic && magic[magindex + 1].cont_level != 0 && ++magindex) continue; continue; /* Skip to next top-level test*/ } ms->offset = m->offset; ms->line = m->lineno; /* if main entry matches, print it... */ switch (mget(ms, s, m, nbytes, offset, cont_level, mode, text, flip, recursion_level + 1, printed_something, need_separator, returnval)) { case -1: return -1; case 0: flush = m->reln != '!'; break; default: if (m->type == FILE_INDIRECT) *returnval = 1; switch (magiccheck(ms, m)) { case -1: return -1; case 0: flush++; break; default: flush = 0; break; } break; } if (flush) { /* * main entry didn't match, * flush its continuations */ while (magindex < nmagic - 1 && magic[magindex + 1].cont_level != 0) magindex++; continue; } if ((e = handle_annotation(ms, m)) != 0) { *need_separator = 1; *printed_something = 1; *returnval = 1; return e; } /* * If we are going to print something, we'll need to print * a blank before we print something else. */ if (*m->desc) { *need_separator = 1; *printed_something = 1; if (print_sep(ms, firstline) == -1) return -1; } if (print && mprint(ms, m) == -1) return -1; ms->c.li[cont_level].off = moffset(ms, m); /* and any continuations that match */ if (file_check_mem(ms, ++cont_level) == -1) return -1; while (++magindex < nmagic && magic[magindex].cont_level != 0) { m = &magic[magindex]; ms->line = m->lineno; /* for messages */ if (cont_level < m->cont_level) continue; if (cont_level > m->cont_level) { /* * We're at the end of the level * "cont_level" continuations. */ cont_level = m->cont_level; } ms->offset = m->offset; if (m->flag & OFFADD) { ms->offset += ms->c.li[cont_level - 1].off; } #ifdef ENABLE_CONDITIONALS if (m->cond == COND_ELSE || m->cond == COND_ELIF) { if (ms->c.li[cont_level].last_match == 1) continue; } #endif switch (mget(ms, s, m, nbytes, offset, cont_level, mode, text, flip, recursion_level + 1, printed_something, need_separator, returnval)) { case -1: return -1; case 0: if (m->reln != '!') continue; flush = 1; break; default: if (m->type == FILE_INDIRECT) *returnval = 1; flush = 0; break; } switch (flush ? 1 : magiccheck(ms, m)) { case -1: return -1; case 0: #ifdef ENABLE_CONDITIONALS ms->c.li[cont_level].last_match = 0; #endif break; default: #ifdef ENABLE_CONDITIONALS ms->c.li[cont_level].last_match = 1; #endif if (m->type == FILE_CLEAR) ms->c.li[cont_level].got_match = 0; else if (ms->c.li[cont_level].got_match) { if (m->type == FILE_DEFAULT) break; } else ms->c.li[cont_level].got_match = 1; if ((e = handle_annotation(ms, m)) != 0) { *need_separator = 1; *printed_something = 1; *returnval = 1; return e; } /* * If we are going to print something, * make sure that we have a separator first. */ if (*m->desc) { if (!*printed_something) { *printed_something = 1; if (print_sep(ms, firstline) == -1) return -1; } } /* * This continuation matched. Print * its message, with a blank before it * if the previous item printed and * this item isn't empty. */ /* space if previous printed */ if (*need_separator && ((m->flag & NOSPACE) == 0) && *m->desc) { if (print && file_printf(ms, " ") == -1) return -1; *need_separator = 0; } if (print && mprint(ms, m) == -1) return -1; ms->c.li[cont_level].off = moffset(ms, m); if (*m->desc) *need_separator = 1; /* * If we see any continuations * at a higher level, * process them. */ if (file_check_mem(ms, ++cont_level) == -1) return -1; break; } } if (*printed_something) { firstline = 0; if (print) *returnval = 1; } if ((ms->flags & MAGIC_CONTINUE) == 0 && *printed_something) { return *returnval; /* don't keep searching */ } } return *returnval; /* This is hit if -k is set or there is no match */ } private int check_fmt(struct magic_set *ms, struct magic *m) { file_regex_t rx; int rc, rv = -1; if (strchr(m->desc, '%') == NULL) return 0; rc = file_regcomp(&rx, "%[-0-9\\.]*s", REG_EXTENDED|REG_NOSUB); if (rc) { file_regerror(&rx, rc, ms); } else { rc = file_regexec(&rx, m->desc, 0, 0, 0); rv = !rc; } file_regfree(&rx); return rv; } #ifndef HAVE_STRNDUP char * strndup(const char *, size_t); char * strndup(const char *str, size_t n) { size_t len; char *copy; for (len = 0; len < n && str[len]; len++) continue; if ((copy = malloc(len + 1)) == NULL) return NULL; (void)memcpy(copy, str, len); copy[len] = '\0'; return copy; } #endif /* HAVE_STRNDUP */ private int32_t mprint(struct magic_set *ms, struct magic *m) { uint64_t v; float vf; double vd; int64_t t = 0; char buf[128], tbuf[26]; union VALUETYPE *p = &ms->ms_value; switch (m->type) { case FILE_BYTE: v = file_signextend(ms, m, (uint64_t)p->b); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%d", (unsigned char)v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%d"), (unsigned char) v) == -1) return -1; break; } t = ms->offset + sizeof(char); break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: v = file_signextend(ms, m, (uint64_t)p->h); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%u", (unsigned short)v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%u"), (unsigned short) v) == -1) return -1; break; } t = ms->offset + sizeof(short); break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: v = file_signextend(ms, m, (uint64_t)p->l); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%u", (uint32_t) v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%u"), (uint32_t) v) == -1) return -1; break; } t = ms->offset + sizeof(int32_t); break; case FILE_QUAD: case FILE_BEQUAD: case FILE_LEQUAD: v = file_signextend(ms, m, p->q); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%" INT64_T_FORMAT "u", (unsigned long long)v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%" INT64_T_FORMAT "u"), (unsigned long long) v) == -1) return -1; break; } t = ms->offset + sizeof(int64_t); break; case FILE_STRING: case FILE_PSTRING: case FILE_BESTRING16: case FILE_LESTRING16: if (m->reln == '=' || m->reln == '!') { if (file_printf(ms, F(ms, m, "%s"), m->value.s) == -1) return -1; t = ms->offset + m->vallen; } else { char *str = p->s; /* compute t before we mangle the string? */ t = ms->offset + strlen(str); if (*m->value.s == '\0') str[strcspn(str, "\n")] = '\0'; if (m->str_flags & STRING_TRIM) { char *last; while (isspace((unsigned char)*str)) str++; last = str; while (*last) last++; --last; while (isspace((unsigned char)*last)) last--; *++last = '\0'; } if (file_printf(ms, F(ms, m, "%s"), str) == -1) return -1; if (m->type == FILE_PSTRING) t += file_pstring_length_size(m); } break; case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->l, FILE_T_LOCAL, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint32_t); break; case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->l, 0, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint32_t); break; case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->q, FILE_T_LOCAL, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->q, 0, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_QWDATE: case FILE_BEQWDATE: case FILE_LEQWDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->q, FILE_T_WINDOWS, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: vf = p->f; switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%g", vf); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%g"), vf) == -1) return -1; break; } t = ms->offset + sizeof(float); break; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: vd = p->d; switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%g", vd); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%g"), vd) == -1) return -1; break; } t = ms->offset + sizeof(double); break; case FILE_REGEX: { char *cp; int rval; cp = strndup((const char *)ms->search.s, ms->search.rm_len); if (cp == NULL) { file_oomem(ms, ms->search.rm_len); return -1; } rval = file_printf(ms, F(ms, m, "%s"), cp); free(cp); if (rval == -1) return -1; if ((m->str_flags & REGEX_OFFSET_START)) t = ms->search.offset; else t = ms->search.offset + ms->search.rm_len; break; } case FILE_SEARCH: if (file_printf(ms, F(ms, m, "%s"), m->value.s) == -1) return -1; if ((m->str_flags & REGEX_OFFSET_START)) t = ms->search.offset; else t = ms->search.offset + m->vallen; break; case FILE_DEFAULT: case FILE_CLEAR: if (file_printf(ms, "%s", m->desc) == -1) return -1; t = ms->offset; break; case FILE_INDIRECT: case FILE_USE: case FILE_NAME: t = ms->offset; break; default: file_magerror(ms, "invalid m->type (%d) in mprint()", m->type); return -1; } return (int32_t)t; } private int32_t moffset(struct magic_set *ms, struct magic *m) { switch (m->type) { case FILE_BYTE: return CAST(int32_t, (ms->offset + sizeof(char))); case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: return CAST(int32_t, (ms->offset + sizeof(short))); case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: return CAST(int32_t, (ms->offset + sizeof(int32_t))); case FILE_QUAD: case FILE_BEQUAD: case FILE_LEQUAD: return CAST(int32_t, (ms->offset + sizeof(int64_t))); case FILE_STRING: case FILE_PSTRING: case FILE_BESTRING16: case FILE_LESTRING16: if (m->reln == '=' || m->reln == '!') return ms->offset + m->vallen; else { union VALUETYPE *p = &ms->ms_value; uint32_t t; if (*m->value.s == '\0') p->s[strcspn(p->s, "\n")] = '\0'; t = CAST(uint32_t, (ms->offset + strlen(p->s))); if (m->type == FILE_PSTRING) t += (uint32_t)file_pstring_length_size(m); return t; } case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: return CAST(int32_t, (ms->offset + sizeof(uint32_t))); case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: return CAST(int32_t, (ms->offset + sizeof(uint32_t))); case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: return CAST(int32_t, (ms->offset + sizeof(uint64_t))); case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: return CAST(int32_t, (ms->offset + sizeof(uint64_t))); case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: return CAST(int32_t, (ms->offset + sizeof(float))); case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: return CAST(int32_t, (ms->offset + sizeof(double))); case FILE_REGEX: if ((m->str_flags & REGEX_OFFSET_START) != 0) return CAST(int32_t, ms->search.offset); else return CAST(int32_t, (ms->search.offset + ms->search.rm_len)); case FILE_SEARCH: if ((m->str_flags & REGEX_OFFSET_START) != 0) return CAST(int32_t, ms->search.offset); else return CAST(int32_t, (ms->search.offset + m->vallen)); case FILE_CLEAR: case FILE_DEFAULT: case FILE_INDIRECT: return ms->offset; default: return 0; } } private int cvt_flip(int type, int flip) { if (flip == 0) return type; switch (type) { case FILE_BESHORT: return FILE_LESHORT; case FILE_BELONG: return FILE_LELONG; case FILE_BEDATE: return FILE_LEDATE; case FILE_BELDATE: return FILE_LELDATE; case FILE_BEQUAD: return FILE_LEQUAD; case FILE_BEQDATE: return FILE_LEQDATE; case FILE_BEQLDATE: return FILE_LEQLDATE; case FILE_BEQWDATE: return FILE_LEQWDATE; case FILE_LESHORT: return FILE_BESHORT; case FILE_LELONG: return FILE_BELONG; case FILE_LEDATE: return FILE_BEDATE; case FILE_LELDATE: return FILE_BELDATE; case FILE_LEQUAD: return FILE_BEQUAD; case FILE_LEQDATE: return FILE_BEQDATE; case FILE_LEQLDATE: return FILE_BEQLDATE; case FILE_LEQWDATE: return FILE_BEQWDATE; case FILE_BEFLOAT: return FILE_LEFLOAT; case FILE_LEFLOAT: return FILE_BEFLOAT; case FILE_BEDOUBLE: return FILE_LEDOUBLE; case FILE_LEDOUBLE: return FILE_BEDOUBLE; default: return type; } } #define DO_CVT(fld, cast) \ if (m->num_mask) \ switch (m->mask_op & FILE_OPS_MASK) { \ case FILE_OPAND: \ p->fld &= cast m->num_mask; \ break; \ case FILE_OPOR: \ p->fld |= cast m->num_mask; \ break; \ case FILE_OPXOR: \ p->fld ^= cast m->num_mask; \ break; \ case FILE_OPADD: \ p->fld += cast m->num_mask; \ break; \ case FILE_OPMINUS: \ p->fld -= cast m->num_mask; \ break; \ case FILE_OPMULTIPLY: \ p->fld *= cast m->num_mask; \ break; \ case FILE_OPDIVIDE: \ p->fld /= cast m->num_mask; \ break; \ case FILE_OPMODULO: \ p->fld %= cast m->num_mask; \ break; \ } \ if (m->mask_op & FILE_OPINVERSE) \ p->fld = ~p->fld \ private void cvt_8(union VALUETYPE *p, const struct magic *m) { DO_CVT(b, (uint8_t)); } private void cvt_16(union VALUETYPE *p, const struct magic *m) { DO_CVT(h, (uint16_t)); } private void cvt_32(union VALUETYPE *p, const struct magic *m) { DO_CVT(l, (uint32_t)); } private void cvt_64(union VALUETYPE *p, const struct magic *m) { DO_CVT(q, (uint64_t)); } #define DO_CVT2(fld, cast) \ if (m->num_mask) \ switch (m->mask_op & FILE_OPS_MASK) { \ case FILE_OPADD: \ p->fld += cast m->num_mask; \ break; \ case FILE_OPMINUS: \ p->fld -= cast m->num_mask; \ break; \ case FILE_OPMULTIPLY: \ p->fld *= cast m->num_mask; \ break; \ case FILE_OPDIVIDE: \ p->fld /= cast m->num_mask; \ break; \ } \ private void cvt_float(union VALUETYPE *p, const struct magic *m) { DO_CVT2(f, (float)); } private void cvt_double(union VALUETYPE *p, const struct magic *m) { DO_CVT2(d, (double)); } /* * Convert the byte order of the data we are looking at * While we're here, let's apply the mask operation * (unless you have a better idea) */ private int mconvert(struct magic_set *ms, struct magic *m, int flip) { union VALUETYPE *p = &ms->ms_value; switch (cvt_flip(m->type, flip)) { case FILE_BYTE: cvt_8(p, m); return 1; case FILE_SHORT: cvt_16(p, m); return 1; case FILE_LONG: case FILE_DATE: case FILE_LDATE: cvt_32(p, m); return 1; case FILE_QUAD: case FILE_QDATE: case FILE_QLDATE: case FILE_QWDATE: cvt_64(p, m); return 1; case FILE_STRING: case FILE_BESTRING16: case FILE_LESTRING16: { /* Null terminate and eat *trailing* return */ p->s[sizeof(p->s) - 1] = '\0'; return 1; } case FILE_PSTRING: { char *ptr1 = p->s, *ptr2 = ptr1 + file_pstring_length_size(m); size_t len = file_pstring_get_length(m, ptr1); if (len >= sizeof(p->s)) len = sizeof(p->s) - 1; while (len--) *ptr1++ = *ptr2++; *ptr1 = '\0'; return 1; } case FILE_BESHORT: p->h = (short)((p->hs[0]<<8)|(p->hs[1])); cvt_16(p, m); return 1; case FILE_BELONG: case FILE_BEDATE: case FILE_BELDATE: p->l = (int32_t) ((p->hl[0]<<24)|(p->hl[1]<<16)|(p->hl[2]<<8)|(p->hl[3])); cvt_32(p, m); return 1; case FILE_BEQUAD: case FILE_BEQDATE: case FILE_BEQLDATE: case FILE_BEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8)|((uint64_t)p->hq[7])); cvt_64(p, m); return 1; case FILE_LESHORT: p->h = (short)((p->hs[1]<<8)|(p->hs[0])); cvt_16(p, m); return 1; case FILE_LELONG: case FILE_LEDATE: case FILE_LELDATE: p->l = (int32_t) ((p->hl[3]<<24)|(p->hl[2]<<16)|(p->hl[1]<<8)|(p->hl[0])); cvt_32(p, m); return 1; case FILE_LEQUAD: case FILE_LEQDATE: case FILE_LEQLDATE: case FILE_LEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8)|((uint64_t)p->hq[0])); cvt_64(p, m); return 1; case FILE_MELONG: case FILE_MEDATE: case FILE_MELDATE: p->l = (int32_t) ((p->hl[1]<<24)|(p->hl[0]<<16)|(p->hl[3]<<8)|(p->hl[2])); cvt_32(p, m); return 1; case FILE_FLOAT: cvt_float(p, m); return 1; case FILE_BEFLOAT: p->l = ((uint32_t)p->hl[0]<<24)|((uint32_t)p->hl[1]<<16)| ((uint32_t)p->hl[2]<<8) |((uint32_t)p->hl[3]); cvt_float(p, m); return 1; case FILE_LEFLOAT: p->l = ((uint32_t)p->hl[3]<<24)|((uint32_t)p->hl[2]<<16)| ((uint32_t)p->hl[1]<<8) |((uint32_t)p->hl[0]); cvt_float(p, m); return 1; case FILE_DOUBLE: cvt_double(p, m); return 1; case FILE_BEDOUBLE: p->q = ((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8) |((uint64_t)p->hq[7]); cvt_double(p, m); return 1; case FILE_LEDOUBLE: p->q = ((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8) |((uint64_t)p->hq[0]); cvt_double(p, m); return 1; case FILE_REGEX: case FILE_SEARCH: case FILE_DEFAULT: case FILE_CLEAR: case FILE_NAME: case FILE_USE: return 1; default: file_magerror(ms, "invalid type %d in mconvert()", m->type); return 0; } } private void mdebug(uint32_t offset, const char *str, size_t len) { (void) fprintf(stderr, "mget/%zu @%d: ", len, offset); file_showstr(stderr, str, len); (void) fputc('\n', stderr); (void) fputc('\n', stderr); } private int mcopy(struct magic_set *ms, union VALUETYPE *p, int type, int indir, const unsigned char *s, uint32_t offset, size_t nbytes, size_t linecnt) { /* * Note: FILE_SEARCH and FILE_REGEX do not actually copy * anything, but setup pointers into the source */ if (indir == 0) { switch (type) { case FILE_SEARCH: ms->search.s = RCAST(const char *, s) + offset; ms->search.s_len = nbytes - offset; ms->search.offset = offset; return 0; case FILE_REGEX: { const char *b; const char *c; const char *last; /* end of search region */ const char *buf; /* start of search region */ const char *end; size_t lines; if (s == NULL) { ms->search.s_len = 0; ms->search.s = NULL; return 0; } buf = RCAST(const char *, s) + offset; end = last = RCAST(const char *, s) + nbytes; /* mget() guarantees buf <= last */ for (lines = linecnt, b = buf; lines && b < end && ((b = CAST(const char *, memchr(c = b, '\n', CAST(size_t, (end - b))))) || (b = CAST(const char *, memchr(c, '\r', CAST(size_t, (end - c)))))); lines--, b++) { last = b; if (b[0] == '\r' && b[1] == '\n') b++; } if (lines) last = RCAST(const char *, s) + nbytes; ms->search.s = buf; ms->search.s_len = last - buf; ms->search.offset = offset; ms->search.rm_len = 0; return 0; } case FILE_BESTRING16: case FILE_LESTRING16: { const unsigned char *src = s + offset; const unsigned char *esrc = s + nbytes; char *dst = p->s; char *edst = &p->s[sizeof(p->s) - 1]; if (type == FILE_BESTRING16) src++; /* check that offset is within range */ if (offset >= nbytes) break; for (/*EMPTY*/; src < esrc; src += 2, dst++) { if (dst < edst) *dst = *src; else break; if (*dst == '\0') { if (type == FILE_BESTRING16 ? *(src - 1) != '\0' : *(src + 1) != '\0') *dst = ' '; } } *edst = '\0'; return 0; } case FILE_STRING: /* XXX - these two should not need */ case FILE_PSTRING: /* to copy anything, but do anyway. */ default: break; } } if (offset >= nbytes) { (void)memset(p, '\0', sizeof(*p)); return 0; } if (nbytes - offset < sizeof(*p)) nbytes = nbytes - offset; else nbytes = sizeof(*p); (void)memcpy(p, s + offset, nbytes); /* * the usefulness of padding with zeroes eludes me, it * might even cause problems */ if (nbytes < sizeof(*p)) (void)memset(((char *)(void *)p) + nbytes, '\0', sizeof(*p) - nbytes); return 0; } private int mget(struct magic_set *ms, const unsigned char *s, struct magic *m, size_t nbytes, size_t o, unsigned int cont_level, int mode, int text, int flip, int recursion_level, int *printed_something, int *need_separator, int *returnval) { uint32_t soffset, offset = ms->offset; uint32_t count = m->str_range; uint32_t lhs; int rv, oneed_separator, in_type; char *sbuf, *rbuf; union VALUETYPE *p = &ms->ms_value; struct mlist ml; if (recursion_level >= 20) { file_error(ms, 0, "recursion nesting exceeded"); return -1; } if (mcopy(ms, p, m->type, m->flag & INDIR, s, (uint32_t)(offset + o), (uint32_t)nbytes, count) == -1) return -1; if ((ms->flags & MAGIC_DEBUG) != 0) { fprintf(stderr, "mget(type=%d, flag=%x, offset=%u, o=%zu, " "nbytes=%zu, count=%u)\n", m->type, m->flag, offset, o, nbytes, count); mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE)); #ifndef COMPILE_ONLY file_mdump(m); #endif } if (m->flag & INDIR) { int off = m->in_offset; if (m->in_op & FILE_OPINDIRECT) { const union VALUETYPE *q = CAST(const union VALUETYPE *, ((const void *)(s + offset + off))); switch (cvt_flip(m->in_type, flip)) { case FILE_BYTE: off = q->b; break; case FILE_SHORT: off = q->h; break; case FILE_BESHORT: off = (short)((q->hs[0]<<8)|(q->hs[1])); break; case FILE_LESHORT: off = (short)((q->hs[1]<<8)|(q->hs[0])); break; case FILE_LONG: off = q->l; break; case FILE_BELONG: case FILE_BEID3: off = (int32_t)((q->hl[0]<<24)|(q->hl[1]<<16)| (q->hl[2]<<8)|(q->hl[3])); break; case FILE_LEID3: case FILE_LELONG: off = (int32_t)((q->hl[3]<<24)|(q->hl[2]<<16)| (q->hl[1]<<8)|(q->hl[0])); break; case FILE_MELONG: off = (int32_t)((q->hl[1]<<24)|(q->hl[0]<<16)| (q->hl[3]<<8)|(q->hl[2])); break; } if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect offs=%u\n", off); } switch (in_type = cvt_flip(m->in_type, flip)) { case FILE_BYTE: if (OFFSET_OOB(nbytes, offset, 1)) return 0; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = p->b & off; break; case FILE_OPOR: offset = p->b | off; break; case FILE_OPXOR: offset = p->b ^ off; break; case FILE_OPADD: offset = p->b + off; break; case FILE_OPMINUS: offset = p->b - off; break; case FILE_OPMULTIPLY: offset = p->b * off; break; case FILE_OPDIVIDE: offset = p->b / off; break; case FILE_OPMODULO: offset = p->b % off; break; } } else offset = p->b; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_BESHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; lhs = (p->hs[0] << 8) | p->hs[1]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_LESHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; lhs = (p->hs[1] << 8) | p->hs[0]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_SHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = p->h & off; break; case FILE_OPOR: offset = p->h | off; break; case FILE_OPXOR: offset = p->h ^ off; break; case FILE_OPADD: offset = p->h + off; break; case FILE_OPMINUS: offset = p->h - off; break; case FILE_OPMULTIPLY: offset = p->h * off; break; case FILE_OPDIVIDE: offset = p->h / off; break; case FILE_OPMODULO: offset = p->h % off; break; } } else offset = p->h; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_BELONG: case FILE_BEID3: if (OFFSET_OOB(nbytes, offset, 4)) return 0; lhs = (p->hl[0] << 24) | (p->hl[1] << 16) | (p->hl[2] << 8) | p->hl[3]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_LELONG: case FILE_LEID3: if (OFFSET_OOB(nbytes, offset, 4)) return 0; lhs = (p->hl[3] << 24) | (p->hl[2] << 16) | (p->hl[1] << 8) | p->hl[0]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_MELONG: if (OFFSET_OOB(nbytes, offset, 4)) return 0; lhs = (p->hl[1] << 24) | (p->hl[0] << 16) | (p->hl[3] << 8) | p->hl[2]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_LONG: if (OFFSET_OOB(nbytes, offset, 4)) return 0; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = p->l & off; break; case FILE_OPOR: offset = p->l | off; break; case FILE_OPXOR: offset = p->l ^ off; break; case FILE_OPADD: offset = p->l + off; break; case FILE_OPMINUS: offset = p->l - off; break; case FILE_OPMULTIPLY: offset = p->l * off; break; case FILE_OPDIVIDE: offset = p->l / off; break; case FILE_OPMODULO: offset = p->l % off; break; } } else offset = p->l; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; default: break; } switch (in_type) { case FILE_LEID3: case FILE_BEID3: offset = ((((offset >> 0) & 0x7f) << 0) | (((offset >> 8) & 0x7f) << 7) | (((offset >> 16) & 0x7f) << 14) | (((offset >> 24) & 0x7f) << 21)) + 10; break; default: break; } if (m->flag & INDIROFFADD) { offset += ms->c.li[cont_level-1].off; if (offset == 0) { if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect *zero* offset\n"); return 0; } if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect +offs=%u\n", offset); } if (mcopy(ms, p, m->type, 0, s, offset, nbytes, count) == -1) return -1; ms->offset = offset; if ((ms->flags & MAGIC_DEBUG) != 0) { mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE)); #ifndef COMPILE_ONLY file_mdump(m); #endif } } /* Verify we have enough data to match magic type */ switch (m->type) { case FILE_BYTE: if (OFFSET_OOB(nbytes, offset, 1)) return 0; break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: if (OFFSET_OOB(nbytes, offset, 4)) return 0; break; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: if (OFFSET_OOB(nbytes, offset, 8)) return 0; break; case FILE_STRING: case FILE_PSTRING: case FILE_SEARCH: if (OFFSET_OOB(nbytes, offset, m->vallen)) return 0; break; case FILE_REGEX: if (nbytes < offset) return 0; break; case FILE_INDIRECT: if (offset == 0) return 0; if (nbytes < offset) return 0; sbuf = ms->o.buf; soffset = ms->offset; ms->o.buf = NULL; ms->offset = 0; rv = file_softmagic(ms, s + offset, nbytes - offset, recursion_level, BINTEST, text); if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv); rbuf = ms->o.buf; ms->o.buf = sbuf; ms->offset = soffset; if (rv == 1) { if ((ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0 && file_printf(ms, F(ms, m, "%u"), offset) == -1) { free(rbuf); return -1; } if (file_printf(ms, "%s", rbuf) == -1) { free(rbuf); return -1; } } free(rbuf); return rv; case FILE_USE: if (nbytes < offset) return 0; sbuf = m->value.s; if (*sbuf == '^') { sbuf++; flip = !flip; } if (file_magicfind(ms, sbuf, &ml) == -1) { file_error(ms, 0, "cannot find entry `%s'", sbuf); return -1; } oneed_separator = *need_separator; if (m->flag & NOSPACE) *need_separator = 0; rv = match(ms, ml.magic, ml.nmagic, s, nbytes, offset + o, mode, text, flip, recursion_level, printed_something, need_separator, returnval); if (rv != 1) *need_separator = oneed_separator; return rv; case FILE_NAME: if (file_printf(ms, "%s", m->desc) == -1) return -1; return 1; case FILE_DEFAULT: /* nothing to check */ case FILE_CLEAR: default: break; } if (!mconvert(ms, m, flip)) return 0; return 1; } private uint64_t file_strncmp(const char *s1, const char *s2, size_t len, uint32_t flags) { /* * Convert the source args to unsigned here so that (1) the * compare will be unsigned as it is in strncmp() and (2) so * the ctype functions will work correctly without extra * casting. */ const unsigned char *a = (const unsigned char *)s1; const unsigned char *b = (const unsigned char *)s2; uint64_t v; /* * What we want here is v = strncmp(s1, s2, len), * but ignoring any nulls. */ v = 0; if (0L == flags) { /* normal string: do it fast */ while (len-- > 0) if ((v = *b++ - *a++) != '\0') break; } else { /* combine the others */ while (len-- > 0) { if ((flags & STRING_IGNORE_LOWERCASE) && islower(*a)) { if ((v = tolower(*b++) - *a++) != '\0') break; } else if ((flags & STRING_IGNORE_UPPERCASE) && isupper(*a)) { if ((v = toupper(*b++) - *a++) != '\0') break; } else if ((flags & STRING_COMPACT_WHITESPACE) && isspace(*a)) { a++; if (isspace(*b++)) { if (!isspace(*a)) while (isspace(*b)) b++; } else { v = 1; break; } } else if ((flags & STRING_COMPACT_OPTIONAL_WHITESPACE) && isspace(*a)) { a++; while (isspace(*b)) b++; } else { if ((v = *b++ - *a++) != '\0') break; } } } return v; } private uint64_t file_strncmp16(const char *a, const char *b, size_t len, uint32_t flags) { /* * XXX - The 16-bit string compare probably needs to be done * differently, especially if the flags are to be supported. * At the moment, I am unsure. */ flags = 0; return file_strncmp(a, b, len, flags); } private int magiccheck(struct magic_set *ms, struct magic *m) { uint64_t l = m->value.q; uint64_t v; float fl, fv; double dl, dv; int matched; union VALUETYPE *p = &ms->ms_value; switch (m->type) { case FILE_BYTE: v = p->b; break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: v = p->h; break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: v = p->l; break; case FILE_QUAD: case FILE_LEQUAD: case FILE_BEQUAD: case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: case FILE_QWDATE: case FILE_BEQWDATE: case FILE_LEQWDATE: v = p->q; break; case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: fl = m->value.f; fv = p->f; switch (m->reln) { case 'x': matched = 1; break; case '!': matched = fv != fl; break; case '=': matched = fv == fl; break; case '>': matched = fv > fl; break; case '<': matched = fv < fl; break; default: file_magerror(ms, "cannot happen with float: invalid relation `%c'", m->reln); return -1; } return matched; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: dl = m->value.d; dv = p->d; switch (m->reln) { case 'x': matched = 1; break; case '!': matched = dv != dl; break; case '=': matched = dv == dl; break; case '>': matched = dv > dl; break; case '<': matched = dv < dl; break; default: file_magerror(ms, "cannot happen with double: invalid relation `%c'", m->reln); return -1; } return matched; case FILE_DEFAULT: case FILE_CLEAR: l = 0; v = 0; break; case FILE_STRING: case FILE_PSTRING: l = 0; v = file_strncmp(m->value.s, p->s, (size_t)m->vallen, m->str_flags); break; case FILE_BESTRING16: case FILE_LESTRING16: l = 0; v = file_strncmp16(m->value.s, p->s, (size_t)m->vallen, m->str_flags); break; case FILE_SEARCH: { /* search ms->search.s for the string m->value.s */ size_t slen; size_t idx; if (ms->search.s == NULL) return 0; slen = MIN(m->vallen, sizeof(m->value.s)); l = 0; v = 0; for (idx = 0; m->str_range == 0 || idx < m->str_range; idx++) { if (slen + idx > ms->search.s_len) break; v = file_strncmp(m->value.s, ms->search.s + idx, slen, m->str_flags); if (v == 0) { /* found match */ ms->search.offset += idx; break; } } break; } case FILE_REGEX: { int rc; file_regex_t rx; if (ms->search.s == NULL) return 0; l = 0; rc = file_regcomp(&rx, m->value.s, REG_EXTENDED|REG_NEWLINE| ((m->str_flags & STRING_IGNORE_CASE) ? REG_ICASE : 0)); if (rc) { file_regerror(&rx, rc, ms); v = (uint64_t)-1; } else { regmatch_t pmatch[1]; #ifndef REG_STARTEND #define REG_STARTEND 0 size_t l = ms->search.s_len - 1; char c = ms->search.s[l]; ((char *)(intptr_t)ms->search.s)[l] = '\0'; #else pmatch[0].rm_so = 0; pmatch[0].rm_eo = ms->search.s_len; #endif rc = file_regexec(&rx, (const char *)ms->search.s, 1, pmatch, REG_STARTEND); #if REG_STARTEND == 0 ((char *)(intptr_t)ms->search.s)[l] = c; #endif switch (rc) { case 0: ms->search.s += (int)pmatch[0].rm_so; ms->search.offset += (size_t)pmatch[0].rm_so; ms->search.rm_len = (size_t)(pmatch[0].rm_eo - pmatch[0].rm_so); v = 0; break; case REG_NOMATCH: v = 1; break; default: file_regerror(&rx, rc, ms); v = (uint64_t)-1; break; } } file_regfree(&rx); if (v == (uint64_t)-1) return -1; break; } case FILE_INDIRECT: case FILE_USE: case FILE_NAME: return 1; default: file_magerror(ms, "invalid type %d in magiccheck()", m->type); return -1; } v = file_signextend(ms, m, v); switch (m->reln) { case 'x': if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u == *any* = 1\n", (unsigned long long)v); matched = 1; break; case '!': matched = v != l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u != %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); break; case '=': matched = v == l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u == %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); break; case '>': if (m->flag & UNSIGNED) { matched = v > l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u > %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); } else { matched = (int64_t) v > (int64_t) l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "d > %" INT64_T_FORMAT "d = %d\n", (long long)v, (long long)l, matched); } break; case '<': if (m->flag & UNSIGNED) { matched = v < l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u < %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); } else { matched = (int64_t) v < (int64_t) l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "d < %" INT64_T_FORMAT "d = %d\n", (long long)v, (long long)l, matched); } break; case '&': matched = (v & l) == l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %" INT64_T_FORMAT "x) == %" INT64_T_FORMAT "x) = %d\n", (unsigned long long)v, (unsigned long long)l, (unsigned long long)l, matched); break; case '^': matched = (v & l) != l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %" INT64_T_FORMAT "x) != %" INT64_T_FORMAT "x) = %d\n", (unsigned long long)v, (unsigned long long)l, (unsigned long long)l, matched); break; default: file_magerror(ms, "cannot happen: invalid relation `%c'", m->reln); return -1; } return matched; } private int handle_annotation(struct magic_set *ms, struct magic *m) { if (ms->flags & MAGIC_APPLE) { if (file_printf(ms, "%.8s", m->apple) == -1) return -1; return 1; } if ((ms->flags & MAGIC_MIME_TYPE) && m->mimetype[0]) { if (file_printf(ms, "%s", m->mimetype) == -1) return -1; return 1; } return 0; } private int print_sep(struct magic_set *ms, int firstline) { if (ms->flags & MAGIC_MIME) return 0; if (firstline) return 0; /* * we found another match * put a newline and '-' to do some simple formatting */ return file_printf(ms, "\n- "); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2147_0
crossvul-cpp_data_bad_1675_1
/* * Kernel-based Virtual Machine driver for Linux * * AMD SVM support * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" #include "pmu.h" #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/kernel.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/trace_events.h> #include <linux/slab.h> #include <asm/perf_event.h> #include <asm/tlbflush.h> #include <asm/desc.h> #include <asm/debugreg.h> #include <asm/kvm_para.h> #include <asm/virtext.h> #include "trace.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); static const struct x86_cpu_id svm_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_SVM), {} }; MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); #define IOPM_ALLOC_ORDER 2 #define MSRPM_ALLOC_ORDER 1 #define SEG_TYPE_LDT 2 #define SEG_TYPE_BUSY_TSS16 3 #define SVM_FEATURE_NPT (1 << 0) #define SVM_FEATURE_LBRV (1 << 1) #define SVM_FEATURE_SVML (1 << 2) #define SVM_FEATURE_NRIP (1 << 3) #define SVM_FEATURE_TSC_RATE (1 << 4) #define SVM_FEATURE_VMCB_CLEAN (1 << 5) #define SVM_FEATURE_FLUSH_ASID (1 << 6) #define SVM_FEATURE_DECODE_ASSIST (1 << 7) #define SVM_FEATURE_PAUSE_FILTER (1 << 10) #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) #define TSC_RATIO_RSVD 0xffffff0000000000ULL #define TSC_RATIO_MIN 0x0000000000000001ULL #define TSC_RATIO_MAX 0x000000ffffffffffULL static bool erratum_383_found __read_mostly; static const u32 host_save_user_msrs[] = { #ifdef CONFIG_X86_64 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, MSR_FS_BASE, #endif MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) struct kvm_vcpu; struct nested_state { struct vmcb *hsave; u64 hsave_msr; u64 vm_cr_msr; u64 vmcb; /* These are the merged vectors */ u32 *msrpm; /* gpa pointers to the real vectors */ u64 vmcb_msrpm; u64 vmcb_iopm; /* A VMEXIT is required but not yet emulated */ bool exit_required; /* cache for intercepts of the guest */ u32 intercept_cr; u32 intercept_dr; u32 intercept_exceptions; u64 intercept; /* Nested Paging related state */ u64 nested_cr3; }; #define MSRPM_OFFSETS 16 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; /* * Set osvw_len to higher value when updated Revision Guides * are published and we know what the new status bits are */ static uint64_t osvw_len = 4, osvw_status; struct vcpu_svm { struct kvm_vcpu vcpu; struct vmcb *vmcb; unsigned long vmcb_pa; struct svm_cpu_data *svm_data; uint64_t asid_generation; uint64_t sysenter_esp; uint64_t sysenter_eip; u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; struct { u16 fs; u16 gs; u16 ldt; u64 gs_base; } host; u32 *msrpm; ulong nmi_iret_rip; struct nested_state nested; bool nmi_singlestep; unsigned int3_injected; unsigned long int3_rip; u32 apf_reason; /* cached guest cpuid flags for faster access */ bool nrips_enabled : 1; }; static DEFINE_PER_CPU(u64, current_tsc_ratio); #define TSC_RATIO_DEFAULT 0x0100000000ULL #define MSR_INVALID 0xffffffffU static const struct svm_direct_access_msrs { u32 index; /* Index of the MSR */ bool always; /* True if intercept is always on */ } direct_access_msrs[] = { { .index = MSR_STAR, .always = true }, { .index = MSR_IA32_SYSENTER_CS, .always = true }, #ifdef CONFIG_X86_64 { .index = MSR_GS_BASE, .always = true }, { .index = MSR_FS_BASE, .always = true }, { .index = MSR_KERNEL_GS_BASE, .always = true }, { .index = MSR_LSTAR, .always = true }, { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK, .always = true }, #endif { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, { .index = MSR_IA32_LASTINTTOIP, .always = false }, { .index = MSR_INVALID, .always = false }, }; /* enable NPT for AMD64 and X86 with PAE */ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) static bool npt_enabled = true; #else static bool npt_enabled; #endif /* allow nested paging (virtualized MMU) for all guests */ static int npt = true; module_param(npt, int, S_IRUGO); /* allow nested virtualization in KVM/SVM */ static int nested = true; module_param(nested, int, S_IRUGO); static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); static void svm_flush_tlb(struct kvm_vcpu *vcpu); static void svm_complete_interrupts(struct vcpu_svm *svm); static int nested_svm_exit_handled(struct vcpu_svm *svm); static int nested_svm_intercept(struct vcpu_svm *svm); static int nested_svm_vmexit(struct vcpu_svm *svm); static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); enum { VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, pause filter count */ VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ VMCB_ASID, /* ASID */ VMCB_INTR, /* int_ctl, int_vector */ VMCB_NPT, /* npt_en, nCR3, gPAT */ VMCB_CR, /* CR0, CR3, CR4, EFER */ VMCB_DR, /* DR6, DR7 */ VMCB_DT, /* GDT, IDT */ VMCB_SEG, /* CS, DS, SS, ES, CPL */ VMCB_CR2, /* CR2 only */ VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ VMCB_DIRTY_MAX, }; /* TPR and CR2 are always written before VMRUN */ #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) static inline void mark_all_dirty(struct vmcb *vmcb) { vmcb->control.clean = 0; } static inline void mark_all_clean(struct vmcb *vmcb) { vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1) & ~VMCB_ALWAYS_DIRTY_MASK; } static inline void mark_dirty(struct vmcb *vmcb, int bit) { vmcb->control.clean &= ~(1 << bit); } static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_svm, vcpu); } static void recalc_intercepts(struct vcpu_svm *svm) { struct vmcb_control_area *c, *h; struct nested_state *g; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); if (!is_guest_mode(&svm->vcpu)) return; c = &svm->vmcb->control; h = &svm->nested.hsave->control; g = &svm->nested; c->intercept_cr = h->intercept_cr | g->intercept_cr; c->intercept_dr = h->intercept_dr | g->intercept_dr; c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; c->intercept = h->intercept | g->intercept; } static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) { if (is_guest_mode(&svm->vcpu)) return svm->nested.hsave; else return svm->vmcb; } static inline void set_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_cr |= (1U << bit); recalc_intercepts(svm); } static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_cr &= ~(1U << bit); recalc_intercepts(svm); } static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); return vmcb->control.intercept_cr & (1U << bit); } static inline void set_dr_intercepts(struct vcpu_svm *svm) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ) | (1 << INTERCEPT_DR1_READ) | (1 << INTERCEPT_DR2_READ) | (1 << INTERCEPT_DR3_READ) | (1 << INTERCEPT_DR4_READ) | (1 << INTERCEPT_DR5_READ) | (1 << INTERCEPT_DR6_READ) | (1 << INTERCEPT_DR7_READ) | (1 << INTERCEPT_DR0_WRITE) | (1 << INTERCEPT_DR1_WRITE) | (1 << INTERCEPT_DR2_WRITE) | (1 << INTERCEPT_DR3_WRITE) | (1 << INTERCEPT_DR4_WRITE) | (1 << INTERCEPT_DR5_WRITE) | (1 << INTERCEPT_DR6_WRITE) | (1 << INTERCEPT_DR7_WRITE); recalc_intercepts(svm); } static inline void clr_dr_intercepts(struct vcpu_svm *svm) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_dr = 0; recalc_intercepts(svm); } static inline void set_exception_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_exceptions |= (1U << bit); recalc_intercepts(svm); } static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept_exceptions &= ~(1U << bit); recalc_intercepts(svm); } static inline void set_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept |= (1ULL << bit); recalc_intercepts(svm); } static inline void clr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); vmcb->control.intercept &= ~(1ULL << bit); recalc_intercepts(svm); } static inline void enable_gif(struct vcpu_svm *svm) { svm->vcpu.arch.hflags |= HF_GIF_MASK; } static inline void disable_gif(struct vcpu_svm *svm) { svm->vcpu.arch.hflags &= ~HF_GIF_MASK; } static inline bool gif_set(struct vcpu_svm *svm) { return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); } static unsigned long iopm_base; struct kvm_ldttss_desc { u16 limit0; u16 base0; unsigned base1:8, type:5, dpl:2, p:1; unsigned limit1:4, zero0:3, g:1, base2:8; u32 base3; u32 zero1; } __attribute__((packed)); struct svm_cpu_data { int cpu; u64 asid_generation; u32 max_asid; u32 next_asid; struct kvm_ldttss_desc *tss_desc; struct page *save_area; }; static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); struct svm_init_data { int cpu; int r; }; static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) #define MSRS_RANGE_SIZE 2048 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) static u32 svm_msrpm_offset(u32 msr) { u32 offset; int i; for (i = 0; i < NUM_MSR_MAPS; i++) { if (msr < msrpm_ranges[i] || msr >= msrpm_ranges[i] + MSRS_IN_RANGE) continue; offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ offset += (i * MSRS_RANGE_SIZE); /* add range offset */ /* Now we have the u8 offset - but need the u32 offset */ return offset / 4; } /* MSR not in any range */ return MSR_INVALID; } #define MAX_INST_SIZE 15 static inline void clgi(void) { asm volatile (__ex(SVM_CLGI)); } static inline void stgi(void) { asm volatile (__ex(SVM_STGI)); } static inline void invlpga(unsigned long addr, u32 asid) { asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid)); } static int get_npt_level(void) { #ifdef CONFIG_X86_64 return PT64_ROOT_LEVEL; #else return PT32E_ROOT_LEVEL; #endif } static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) { vcpu->arch.efer = efer; if (!npt_enabled && !(efer & EFER_LMA)) efer &= ~EFER_LME; to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); } static int is_external_interrupt(u32 info) { info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); } static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u32 ret = 0; if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; return ret; } static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) { struct vcpu_svm *svm = to_svm(vcpu); if (mask == 0) svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; else svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; } static void skip_emulated_instruction(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.next_rip != 0) { WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); svm->next_rip = svm->vmcb->control.next_rip; } if (!svm->next_rip) { if (emulate_instruction(vcpu, EMULTYPE_SKIP) != EMULATE_DONE) printk(KERN_DEBUG "%s: NOP\n", __func__); return; } if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n", __func__, kvm_rip_read(vcpu), svm->next_rip); kvm_rip_write(vcpu, svm->next_rip); svm_set_interrupt_shadow(vcpu, 0); } static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code, bool reinject) { struct vcpu_svm *svm = to_svm(vcpu); /* * If we are within a nested VM we'd better #VMEXIT and let the guest * handle the exception */ if (!reinject && nested_svm_check_exception(svm, nr, has_error_code, error_code)) return; if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) { unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); /* * For guest debugging where we have to reinject #BP if some * INT3 is guest-owned: * Emulate nRIP by moving RIP forward. Will fail if injection * raises a fault that is not intercepted. Still better than * failing in all cases. */ skip_emulated_instruction(&svm->vcpu); rip = kvm_rip_read(&svm->vcpu); svm->int3_rip = rip + svm->vmcb->save.cs.base; svm->int3_injected = rip - old_rip; } svm->vmcb->control.event_inj = nr | SVM_EVTINJ_VALID | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) | SVM_EVTINJ_TYPE_EXEPT; svm->vmcb->control.event_inj_err = error_code; } static void svm_init_erratum_383(void) { u32 low, high; int err; u64 val; if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) return; /* Use _safe variants to not break nested virtualization */ val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); if (err) return; val |= (1ULL << 47); low = lower_32_bits(val); high = upper_32_bits(val); native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); erratum_383_found = true; } static void svm_init_osvw(struct kvm_vcpu *vcpu) { /* * Guests should see errata 400 and 415 as fixed (assuming that * HLT and IO instructions are intercepted). */ vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; vcpu->arch.osvw.status = osvw_status & ~(6ULL); /* * By increasing VCPU's osvw.length to 3 we are telling the guest that * all osvw.status bits inside that length, including bit 0 (which is * reserved for erratum 298), are valid. However, if host processor's * osvw_len is 0 then osvw_status[0] carries no information. We need to * be conservative here and therefore we tell the guest that erratum 298 * is present (because we really don't know). */ if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) vcpu->arch.osvw.status |= 1; } static int has_svm(void) { const char *msg; if (!cpu_has_svm(&msg)) { printk(KERN_INFO "has_svm: %s\n", msg); return 0; } return 1; } static void svm_hardware_disable(void) { /* Make sure we clean up behind us */ if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); cpu_svm_disable(); amd_pmu_disable_virt(); } static int svm_hardware_enable(void) { struct svm_cpu_data *sd; uint64_t efer; struct desc_ptr gdt_descr; struct desc_struct *gdt; int me = raw_smp_processor_id(); rdmsrl(MSR_EFER, efer); if (efer & EFER_SVME) return -EBUSY; if (!has_svm()) { pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me); return -EINVAL; } sd = per_cpu(svm_data, me); if (!sd) { pr_err("%s: svm_data is NULL on %d\n", __func__, me); return -EINVAL; } sd->asid_generation = 1; sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; sd->next_asid = sd->max_asid + 1; native_store_gdt(&gdt_descr); gdt = (struct desc_struct *)gdt_descr.address; sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); wrmsrl(MSR_EFER, efer | EFER_SVME); wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); } /* * Get OSVW bits. * * Note that it is possible to have a system with mixed processor * revisions and therefore different OSVW bits. If bits are not the same * on different processors then choose the worst case (i.e. if erratum * is present on one processor and not on another then assume that the * erratum is present everywhere). */ if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { uint64_t len, status = 0; int err; len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); if (!err) status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &err); if (err) osvw_status = osvw_len = 0; else { if (len < osvw_len) osvw_len = len; osvw_status |= status; osvw_status &= (1ULL << osvw_len) - 1; } } else osvw_status = osvw_len = 0; svm_init_erratum_383(); amd_pmu_enable_virt(); return 0; } static void svm_cpu_uninit(int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id()); if (!sd) return; per_cpu(svm_data, raw_smp_processor_id()) = NULL; __free_page(sd->save_area); kfree(sd); } static int svm_cpu_init(int cpu) { struct svm_cpu_data *sd; int r; sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); if (!sd) return -ENOMEM; sd->cpu = cpu; sd->save_area = alloc_page(GFP_KERNEL); r = -ENOMEM; if (!sd->save_area) goto err_1; per_cpu(svm_data, cpu) = sd; return 0; err_1: kfree(sd); return r; } static bool valid_msr_intercept(u32 index) { int i; for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) if (direct_access_msrs[i].index == index) return true; return false; } static void set_msr_interception(u32 *msrpm, unsigned msr, int read, int write) { u8 bit_read, bit_write; unsigned long tmp; u32 offset; /* * If this warning triggers extend the direct_access_msrs list at the * beginning of the file */ WARN_ON(!valid_msr_intercept(msr)); offset = svm_msrpm_offset(msr); bit_read = 2 * (msr & 0x0f); bit_write = 2 * (msr & 0x0f) + 1; tmp = msrpm[offset]; BUG_ON(offset == MSR_INVALID); read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp); write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp); msrpm[offset] = tmp; } static void svm_vcpu_init_msrpm(u32 *msrpm) { int i; memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { if (!direct_access_msrs[i].always) continue; set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1); } } static void add_msr_offset(u32 offset) { int i; for (i = 0; i < MSRPM_OFFSETS; ++i) { /* Offset already in list? */ if (msrpm_offsets[i] == offset) return; /* Slot used by another offset? */ if (msrpm_offsets[i] != MSR_INVALID) continue; /* Add offset to list */ msrpm_offsets[i] = offset; return; } /* * If this BUG triggers the msrpm_offsets table has an overflow. Just * increase MSRPM_OFFSETS in this case. */ BUG(); } static void init_msrpm_offsets(void) { int i; memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { u32 offset; offset = svm_msrpm_offset(direct_access_msrs[i].index); BUG_ON(offset == MSR_INVALID); add_msr_offset(offset); } } static void svm_enable_lbrv(struct vcpu_svm *svm) { u32 *msrpm = svm->msrpm; svm->vmcb->control.lbr_ctl = 1; set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1); } static void svm_disable_lbrv(struct vcpu_svm *svm) { u32 *msrpm = svm->msrpm; svm->vmcb->control.lbr_ctl = 0; set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); } static __init int svm_hardware_setup(void) { int cpu; struct page *iopm_pages; void *iopm_va; int r; iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); if (!iopm_pages) return -ENOMEM; iopm_va = page_address(iopm_pages); memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; init_msrpm_offsets(); if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) kvm_enable_efer_bits(EFER_FFXSR); if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { kvm_has_tsc_control = true; kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; kvm_tsc_scaling_ratio_frac_bits = 32; } if (nested) { printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); } for_each_possible_cpu(cpu) { r = svm_cpu_init(cpu); if (r) goto err; } if (!boot_cpu_has(X86_FEATURE_NPT)) npt_enabled = false; if (npt_enabled && !npt) { printk(KERN_INFO "kvm: Nested Paging disabled\n"); npt_enabled = false; } if (npt_enabled) { printk(KERN_INFO "kvm: Nested Paging enabled\n"); kvm_enable_tdp(); } else kvm_disable_tdp(); return 0; err: __free_pages(iopm_pages, IOPM_ALLOC_ORDER); iopm_base = 0; return r; } static __exit void svm_hardware_unsetup(void) { int cpu; for_each_possible_cpu(cpu) svm_cpu_uninit(cpu); __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); iopm_base = 0; } static void init_seg(struct vmcb_seg *seg) { seg->selector = 0; seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ seg->limit = 0xffff; seg->base = 0; } static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) { seg->selector = 0; seg->attrib = SVM_SELECTOR_P_MASK | type; seg->limit = 0xffff; seg->base = 0; } static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); return svm->vmcb->control.tsc_offset; } static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { struct vcpu_svm *svm = to_svm(vcpu); u64 g_tsc_offset = 0; if (is_guest_mode(vcpu)) { g_tsc_offset = svm->vmcb->control.tsc_offset - svm->nested.hsave->control.tsc_offset; svm->nested.hsave->control.tsc_offset = offset; } else trace_kvm_write_tsc_offset(vcpu->vcpu_id, svm->vmcb->control.tsc_offset, offset); svm->vmcb->control.tsc_offset = offset + g_tsc_offset; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.tsc_offset += adjustment; if (is_guest_mode(vcpu)) svm->nested.hsave->control.tsc_offset += adjustment; else trace_kvm_write_tsc_offset(vcpu->vcpu_id, svm->vmcb->control.tsc_offset - adjustment, svm->vmcb->control.tsc_offset); mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } static void init_vmcb(struct vcpu_svm *svm) { struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_save_area *save = &svm->vmcb->save; svm->vcpu.fpu_active = 1; svm->vcpu.arch.hflags = 0; set_cr_intercept(svm, INTERCEPT_CR0_READ); set_cr_intercept(svm, INTERCEPT_CR3_READ); set_cr_intercept(svm, INTERCEPT_CR4_READ); set_cr_intercept(svm, INTERCEPT_CR0_WRITE); set_cr_intercept(svm, INTERCEPT_CR3_WRITE); set_cr_intercept(svm, INTERCEPT_CR4_WRITE); set_cr_intercept(svm, INTERCEPT_CR8_WRITE); set_dr_intercepts(svm); set_exception_intercept(svm, PF_VECTOR); set_exception_intercept(svm, UD_VECTOR); set_exception_intercept(svm, MC_VECTOR); set_intercept(svm, INTERCEPT_INTR); set_intercept(svm, INTERCEPT_NMI); set_intercept(svm, INTERCEPT_SMI); set_intercept(svm, INTERCEPT_SELECTIVE_CR0); set_intercept(svm, INTERCEPT_RDPMC); set_intercept(svm, INTERCEPT_CPUID); set_intercept(svm, INTERCEPT_INVD); set_intercept(svm, INTERCEPT_HLT); set_intercept(svm, INTERCEPT_INVLPG); set_intercept(svm, INTERCEPT_INVLPGA); set_intercept(svm, INTERCEPT_IOIO_PROT); set_intercept(svm, INTERCEPT_MSR_PROT); set_intercept(svm, INTERCEPT_TASK_SWITCH); set_intercept(svm, INTERCEPT_SHUTDOWN); set_intercept(svm, INTERCEPT_VMRUN); set_intercept(svm, INTERCEPT_VMMCALL); set_intercept(svm, INTERCEPT_VMLOAD); set_intercept(svm, INTERCEPT_VMSAVE); set_intercept(svm, INTERCEPT_STGI); set_intercept(svm, INTERCEPT_CLGI); set_intercept(svm, INTERCEPT_SKINIT); set_intercept(svm, INTERCEPT_WBINVD); set_intercept(svm, INTERCEPT_MONITOR); set_intercept(svm, INTERCEPT_MWAIT); set_intercept(svm, INTERCEPT_XSETBV); control->iopm_base_pa = iopm_base; control->msrpm_base_pa = __pa(svm->msrpm); control->int_ctl = V_INTR_MASKING_MASK; init_seg(&save->es); init_seg(&save->ss); init_seg(&save->ds); init_seg(&save->fs); init_seg(&save->gs); save->cs.selector = 0xf000; save->cs.base = 0xffff0000; /* Executable/Readable Code Segment */ save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; save->cs.limit = 0xffff; save->gdtr.limit = 0xffff; save->idtr.limit = 0xffff; init_sys_seg(&save->ldtr, SEG_TYPE_LDT); init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); svm_set_efer(&svm->vcpu, 0); save->dr6 = 0xffff0ff0; kvm_set_rflags(&svm->vcpu, 2); save->rip = 0x0000fff0; svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; /* * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. * It also updates the guest-visible cr0 value. */ svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); kvm_mmu_reset_context(&svm->vcpu); save->cr4 = X86_CR4_PAE; /* rdx = ?? */ if (npt_enabled) { /* Setup VMCB for Nested Paging */ control->nested_ctl = 1; clr_intercept(svm, INTERCEPT_INVLPG); clr_exception_intercept(svm, PF_VECTOR); clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = svm->vcpu.arch.pat; save->cr3 = 0; save->cr4 = 0; } svm->asid_generation = 0; svm->nested.vmcb = 0; svm->vcpu.arch.hflags = 0; if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { control->pause_filter_count = 3000; set_intercept(svm, INTERCEPT_PAUSE); } mark_all_dirty(svm->vmcb); enable_gif(svm); } static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { struct vcpu_svm *svm = to_svm(vcpu); u32 dummy; u32 eax = 1; if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; } init_vmcb(svm); kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy); kvm_register_write(vcpu, VCPU_REGS_RDX, eax); } static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) { struct vcpu_svm *svm; struct page *page; struct page *msrpm_pages; struct page *hsave_page; struct page *nested_msrpm_pages; int err; svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!svm) { err = -ENOMEM; goto out; } err = kvm_vcpu_init(&svm->vcpu, kvm, id); if (err) goto free_svm; err = -ENOMEM; page = alloc_page(GFP_KERNEL); if (!page) goto uninit; msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!msrpm_pages) goto free_page1; nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!nested_msrpm_pages) goto free_page2; hsave_page = alloc_page(GFP_KERNEL); if (!hsave_page) goto free_page3; svm->nested.hsave = page_address(hsave_page); svm->msrpm = page_address(msrpm_pages); svm_vcpu_init_msrpm(svm->msrpm); svm->nested.msrpm = page_address(nested_msrpm_pages); svm_vcpu_init_msrpm(svm->nested.msrpm); svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->asid_generation = 0; init_vmcb(svm); svm_init_osvw(&svm->vcpu); return &svm->vcpu; free_page3: __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); free_page2: __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); free_page1: __free_page(page); uninit: kvm_vcpu_uninit(&svm->vcpu); free_svm: kmem_cache_free(kvm_vcpu_cache, svm); out: return ERR_PTR(err); } static void svm_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); __free_page(virt_to_page(svm->nested.hsave)); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_svm *svm = to_svm(vcpu); int i; if (unlikely(cpu != vcpu->cpu)) { svm->asid_generation = 0; mark_all_dirty(svm->vmcb); } #ifdef CONFIG_X86_64 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); #endif savesegment(fs, svm->host.fs); savesegment(gs, svm->host.gs); svm->host.ldt = kvm_read_ldt(); for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { __this_cpu_write(current_tsc_ratio, tsc_ratio); wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); } } } static void svm_vcpu_put(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); int i; ++vcpu->stat.host_state_reload; kvm_load_ldt(svm->host.ldt); #ifdef CONFIG_X86_64 loadsegment(fs, svm->host.fs); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); load_gs_index(svm->host.gs); #else #ifdef CONFIG_X86_32_LAZY_GS loadsegment(gs, svm->host.gs); #endif #endif for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) { return to_svm(vcpu)->vmcb->save.rflags; } static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { /* * Any change of EFLAGS.VM is accompained by a reload of SS * (caused by either a task switch or an inter-privilege IRET), * so we do not need to update the CPL here. */ to_svm(vcpu)->vmcb->save.rflags = rflags; } static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { switch (reg) { case VCPU_EXREG_PDPTR: BUG_ON(!npt_enabled); load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); break; default: BUG(); } } static void svm_set_vintr(struct vcpu_svm *svm) { set_intercept(svm, INTERCEPT_VINTR); } static void svm_clear_vintr(struct vcpu_svm *svm) { clr_intercept(svm, INTERCEPT_VINTR); } static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) { struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; switch (seg) { case VCPU_SREG_CS: return &save->cs; case VCPU_SREG_DS: return &save->ds; case VCPU_SREG_ES: return &save->es; case VCPU_SREG_FS: return &save->fs; case VCPU_SREG_GS: return &save->gs; case VCPU_SREG_SS: return &save->ss; case VCPU_SREG_TR: return &save->tr; case VCPU_SREG_LDTR: return &save->ldtr; } BUG(); return NULL; } static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct vmcb_seg *s = svm_seg(vcpu, seg); return s->base; } static void svm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vmcb_seg *s = svm_seg(vcpu, seg); var->base = s->base; var->limit = s->limit; var->selector = s->selector; var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; /* * AMD CPUs circa 2014 track the G bit for all segments except CS. * However, the SVM spec states that the G bit is not observed by the * CPU, and some VMware virtual CPUs drop the G bit for all segments. * So let's synthesize a legal G bit for all segments, this helps * running KVM nested. It also helps cross-vendor migration, because * Intel's vmentry has a check on the 'G' bit. */ var->g = s->limit > 0xfffff; /* * AMD's VMCB does not have an explicit unusable field, so emulate it * for cross vendor migration purposes by "not present" */ var->unusable = !var->present || (var->type == 0); switch (seg) { case VCPU_SREG_TR: /* * Work around a bug where the busy flag in the tr selector * isn't exposed */ var->type |= 0x2; break; case VCPU_SREG_DS: case VCPU_SREG_ES: case VCPU_SREG_FS: case VCPU_SREG_GS: /* * The accessed bit must always be set in the segment * descriptor cache, although it can be cleared in the * descriptor, the cached bit always remains at 1. Since * Intel has a check on this, set it here to support * cross-vendor migration. */ if (!var->unusable) var->type |= 0x1; break; case VCPU_SREG_SS: /* * On AMD CPUs sometimes the DB bit in the segment * descriptor is left as 1, although the whole segment has * been made unusable. Clear it here to pass an Intel VMX * entry check when cross vendor migrating. */ if (var->unusable) var->db = 0; var->dpl = to_svm(vcpu)->vmcb->save.cpl; break; } } static int svm_get_cpl(struct kvm_vcpu *vcpu) { struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; return save->cpl; } static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); dt->size = svm->vmcb->save.idtr.limit; dt->address = svm->vmcb->save.idtr.base; } static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.idtr.limit = dt->size; svm->vmcb->save.idtr.base = dt->address ; mark_dirty(svm->vmcb, VMCB_DT); } static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); dt->size = svm->vmcb->save.gdtr.limit; dt->address = svm->vmcb->save.gdtr.base; } static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.gdtr.limit = dt->size; svm->vmcb->save.gdtr.base = dt->address ; mark_dirty(svm->vmcb, VMCB_DT); } static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { } static void svm_decache_cr3(struct kvm_vcpu *vcpu) { } static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { } static void update_cr0_intercept(struct vcpu_svm *svm) { ulong gcr0 = svm->vcpu.arch.cr0; u64 *hcr0 = &svm->vmcb->save.cr0; if (!svm->vcpu.fpu_active) *hcr0 |= SVM_CR0_SELECTIVE_MASK; else *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) | (gcr0 & SVM_CR0_SELECTIVE_MASK); mark_dirty(svm->vmcb, VMCB_CR); if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { clr_cr_intercept(svm, INTERCEPT_CR0_READ); clr_cr_intercept(svm, INTERCEPT_CR0_WRITE); } else { set_cr_intercept(svm, INTERCEPT_CR0_READ); set_cr_intercept(svm, INTERCEPT_CR0_WRITE); } } static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { vcpu->arch.efer |= EFER_LMA; svm->vmcb->save.efer |= EFER_LMA | EFER_LME; } if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { vcpu->arch.efer &= ~EFER_LMA; svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); } } #endif vcpu->arch.cr0 = cr0; if (!npt_enabled) cr0 |= X86_CR0_PG | X86_CR0_WP; if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; /* * re-enable caching here because the QEMU bios * does not do it - this results in some delay at * reboot */ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); } static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; if (cr4 & X86_CR4_VMXE) return 1; if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) svm_flush_tlb(vcpu); vcpu->arch.cr4 = cr4; if (!npt_enabled) cr4 |= X86_CR4_PAE; cr4 |= host_cr4_mce; to_svm(vcpu)->vmcb->save.cr4 = cr4; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); return 0; } static void svm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_seg *s = svm_seg(vcpu, seg); s->base = var->base; s->limit = var->limit; s->selector = var->selector; if (var->unusable) s->attrib = 0; else { s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; } /* * This is always accurate, except if SYSRET returned to a segment * with SS.DPL != 3. Intel does not have this quirk, and always * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it * would entail passing the CPL to userspace and back. */ if (seg == VCPU_SREG_SS) svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; mark_dirty(svm->vmcb, VMCB_SEG); } static void update_db_bp_intercept(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); clr_exception_intercept(svm, DB_VECTOR); clr_exception_intercept(svm, BP_VECTOR); if (svm->nmi_singlestep) set_exception_intercept(svm, DB_VECTOR); if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { if (vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) set_exception_intercept(svm, DB_VECTOR); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) set_exception_intercept(svm, BP_VECTOR); } else vcpu->guest_debug = 0; } static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) { if (sd->next_asid > sd->max_asid) { ++sd->asid_generation; sd->next_asid = 1; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; } svm->asid_generation = sd->asid_generation; svm->vmcb->control.asid = sd->next_asid++; mark_dirty(svm->vmcb, VMCB_ASID); } static u64 svm_get_dr6(struct kvm_vcpu *vcpu) { return to_svm(vcpu)->vmcb->save.dr6; } static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.dr6 = value; mark_dirty(svm->vmcb, VMCB_DR); } static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); get_debugreg(vcpu->arch.db[0], 0); get_debugreg(vcpu->arch.db[1], 1); get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[3], 3); vcpu->arch.dr6 = svm_get_dr6(vcpu); vcpu->arch.dr7 = svm->vmcb->save.dr7; vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; set_dr_intercepts(svm); } static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.dr7 = value; mark_dirty(svm->vmcb, VMCB_DR); } static int pf_interception(struct vcpu_svm *svm) { u64 fault_address = svm->vmcb->control.exit_info_2; u32 error_code; int r = 1; switch (svm->apf_reason) { default: error_code = svm->vmcb->control.exit_info_1; trace_kvm_page_fault(fault_address, error_code); if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, svm->vmcb->control.insn_bytes, svm->vmcb->control.insn_len); break; case KVM_PV_REASON_PAGE_NOT_PRESENT: svm->apf_reason = 0; local_irq_disable(); kvm_async_pf_task_wait(fault_address); local_irq_enable(); break; case KVM_PV_REASON_PAGE_READY: svm->apf_reason = 0; local_irq_disable(); kvm_async_pf_task_wake(fault_address); local_irq_enable(); break; } return r; } static int db_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; if (!(svm->vcpu.guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && !svm->nmi_singlestep) { kvm_queue_exception(&svm->vcpu, DB_VECTOR); return 1; } if (svm->nmi_singlestep) { svm->nmi_singlestep = false; if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); update_db_bp_intercept(&svm->vcpu); } if (svm->vcpu.guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; kvm_run->debug.arch.exception = DB_VECTOR; return 0; } return 1; } static int bp_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; kvm_run->debug.arch.exception = BP_VECTOR; return 0; } static int ud_interception(struct vcpu_svm *svm) { int er; er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } static void svm_fpu_activate(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); clr_exception_intercept(svm, NM_VECTOR); svm->vcpu.fpu_active = 1; update_cr0_intercept(svm); } static int nm_interception(struct vcpu_svm *svm) { svm_fpu_activate(&svm->vcpu); return 1; } static bool is_erratum_383(void) { int err, i; u64 value; if (!erratum_383_found) return false; value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); if (err) return false; /* Bit 62 may or may not be set for this mce */ value &= ~(1ULL << 62); if (value != 0xb600000000010015ULL) return false; /* Clear MCi_STATUS registers */ for (i = 0; i < 6; ++i) native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); if (!err) { u32 low, high; value &= ~(1ULL << 2); low = lower_32_bits(value); high = upper_32_bits(value); native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); } /* Flush tlb to evict multi-match entries */ __flush_tlb_all(); return true; } static void svm_handle_mce(struct vcpu_svm *svm) { if (is_erratum_383()) { /* * Erratum 383 triggered. Guest state is corrupt so kill the * guest. */ pr_err("KVM: Guest triggered AMD Erratum 383\n"); kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); return; } /* * On an #MC intercept the MCE handler is not called automatically in * the host. So do it by hand here. */ asm volatile ( "int $0x12\n"); /* not sure if we ever come back to this point */ return; } static int mc_interception(struct vcpu_svm *svm) { return 1; } static int shutdown_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; /* * VMCB is undefined after a SHUTDOWN intercept * so reinitialize it. */ clear_page(svm->vmcb); init_vmcb(svm); kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int io_interception(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ int size, in, string; unsigned port; ++svm->vcpu.stat.io_exits; string = (io_info & SVM_IOIO_STR_MASK) != 0; in = (io_info & SVM_IOIO_TYPE_MASK) != 0; if (string || in) return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = io_info >> 16; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; svm->next_rip = svm->vmcb->control.exit_info_2; skip_emulated_instruction(&svm->vcpu); return kvm_fast_pio_out(vcpu, size, port); } static int nmi_interception(struct vcpu_svm *svm) { return 1; } static int intr_interception(struct vcpu_svm *svm) { ++svm->vcpu.stat.irq_exits; return 1; } static int nop_on_interception(struct vcpu_svm *svm) { return 1; } static int halt_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; return kvm_emulate_halt(&svm->vcpu); } static int vmmcall_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; kvm_emulate_hypercall(&svm->vcpu); return 1; } static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); return svm->nested.nested_cr3; } static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) { struct vcpu_svm *svm = to_svm(vcpu); u64 cr3 = svm->nested.nested_cr3; u64 pdpte; int ret; ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, offset_in_page(cr3) + index * 8, 8); if (ret) return 0; return pdpte; } static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.nested_cr3 = root; mark_dirty(svm->vmcb, VMCB_NPT); svm_flush_tlb(vcpu); } static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { /* * TODO: track the cause of the nested page fault, and * correctly fill in the high bits of exit_info_1. */ svm->vmcb->control.exit_code = SVM_EXIT_NPF; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = (1ULL << 32); svm->vmcb->control.exit_info_2 = fault->address; } svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; svm->vmcb->control.exit_info_1 |= fault->error_code; /* * The present bit is always zero for page structure faults on real * hardware. */ if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) svm->vmcb->control.exit_info_1 &= ~1; nested_svm_vmexit(svm); } static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); kvm_init_shadow_mmu(vcpu); vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu.shadow_root_level = get_npt_level(); reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) { vcpu->arch.walk_mmu = &vcpu->arch.mmu; } static int nested_svm_check_permissions(struct vcpu_svm *svm) { if (!(svm->vcpu.arch.efer & EFER_SVME) || !is_paging(&svm->vcpu)) { kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } if (svm->vmcb->save.cpl) { kvm_inject_gp(&svm->vcpu, 0); return 1; } return 0; } static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code) { int vmexit; if (!is_guest_mode(&svm->vcpu)) return 0; svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = error_code; svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; vmexit = nested_svm_intercept(svm); if (vmexit == NESTED_EXIT_DONE) svm->nested.exit_required = true; return vmexit; } /* This function returns true if it is save to enable the irq window */ static inline bool nested_svm_intr(struct vcpu_svm *svm) { if (!is_guest_mode(&svm->vcpu)) return true; if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) return true; if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) return false; /* * if vmexit was already requested (by intercepted exception * for instance) do not overwrite it with "external interrupt" * vmexit. */ if (svm->nested.exit_required) return false; svm->vmcb->control.exit_code = SVM_EXIT_INTR; svm->vmcb->control.exit_info_1 = 0; svm->vmcb->control.exit_info_2 = 0; if (svm->nested.intercept & 1ULL) { /* * The #vmexit can't be emulated here directly because this * code path runs with irqs and preemption disabled. A * #vmexit emulation might sleep. Only signal request for * the #vmexit here. */ svm->nested.exit_required = true; trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); return false; } return true; } /* This function returns true if it is save to enable the nmi window */ static inline bool nested_svm_nmi(struct vcpu_svm *svm) { if (!is_guest_mode(&svm->vcpu)) return true; if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) return true; svm->vmcb->control.exit_code = SVM_EXIT_NMI; svm->nested.exit_required = true; return false; } static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page) { struct page *page; might_sleep(); page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT); if (is_error_page(page)) goto error; *_page = page; return kmap(page); error: kvm_inject_gp(&svm->vcpu, 0); return NULL; } static void nested_svm_unmap(struct page *page) { kunmap(page); kvm_release_page_dirty(page); } static int nested_svm_intercept_ioio(struct vcpu_svm *svm) { unsigned port, size, iopm_len; u16 val, mask; u8 start_bit; u64 gpa; if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) return NESTED_EXIT_HOST; port = svm->vmcb->control.exit_info_1 >> 16; size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; gpa = svm->nested.vmcb_iopm + (port / 8); start_bit = port % 8; iopm_len = (start_bit + size > 8) ? 2 : 1; mask = (0xf >> (4 - size)) << start_bit; val = 0; if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) return NESTED_EXIT_DONE; return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; } static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) { u32 offset, msr, value; int write, mask; if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) return NESTED_EXIT_HOST; msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; offset = svm_msrpm_offset(msr); write = svm->vmcb->control.exit_info_1 & 1; mask = 1 << ((2 * (msr & 0xf)) + write); if (offset == MSR_INVALID) return NESTED_EXIT_DONE; /* Offset is in 32 bit units but need in 8 bit units */ offset *= 4; if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4)) return NESTED_EXIT_DONE; return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; } static int nested_svm_exit_special(struct vcpu_svm *svm) { u32 exit_code = svm->vmcb->control.exit_code; switch (exit_code) { case SVM_EXIT_INTR: case SVM_EXIT_NMI: case SVM_EXIT_EXCP_BASE + MC_VECTOR: return NESTED_EXIT_HOST; case SVM_EXIT_NPF: /* For now we are always handling NPFs when using them */ if (npt_enabled) return NESTED_EXIT_HOST; break; case SVM_EXIT_EXCP_BASE + PF_VECTOR: /* When we're shadowing, trap PFs, but not async PF */ if (!npt_enabled && svm->apf_reason == 0) return NESTED_EXIT_HOST; break; case SVM_EXIT_EXCP_BASE + NM_VECTOR: nm_interception(svm); break; default: break; } return NESTED_EXIT_CONTINUE; } /* * If this function returns true, this #vmexit was already handled */ static int nested_svm_intercept(struct vcpu_svm *svm) { u32 exit_code = svm->vmcb->control.exit_code; int vmexit = NESTED_EXIT_HOST; switch (exit_code) { case SVM_EXIT_MSR: vmexit = nested_svm_exit_handled_msr(svm); break; case SVM_EXIT_IOIO: vmexit = nested_svm_intercept_ioio(svm); break; case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); if (svm->nested.intercept_cr & bit) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); if (svm->nested.intercept_dr & bit) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); if (svm->nested.intercept_exceptions & excp_bits) vmexit = NESTED_EXIT_DONE; /* async page fault always cause vmexit */ else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) && svm->apf_reason != 0) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_ERR: { vmexit = NESTED_EXIT_DONE; break; } default: { u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); if (svm->nested.intercept & exit_bits) vmexit = NESTED_EXIT_DONE; } } return vmexit; } static int nested_svm_exit_handled(struct vcpu_svm *svm) { int vmexit; vmexit = nested_svm_intercept(svm); if (vmexit == NESTED_EXIT_DONE) nested_svm_vmexit(svm); return vmexit; } static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb) { struct vmcb_control_area *dst = &dst_vmcb->control; struct vmcb_control_area *from = &from_vmcb->control; dst->intercept_cr = from->intercept_cr; dst->intercept_dr = from->intercept_dr; dst->intercept_exceptions = from->intercept_exceptions; dst->intercept = from->intercept; dst->iopm_base_pa = from->iopm_base_pa; dst->msrpm_base_pa = from->msrpm_base_pa; dst->tsc_offset = from->tsc_offset; dst->asid = from->asid; dst->tlb_ctl = from->tlb_ctl; dst->int_ctl = from->int_ctl; dst->int_vector = from->int_vector; dst->int_state = from->int_state; dst->exit_code = from->exit_code; dst->exit_code_hi = from->exit_code_hi; dst->exit_info_1 = from->exit_info_1; dst->exit_info_2 = from->exit_info_2; dst->exit_int_info = from->exit_int_info; dst->exit_int_info_err = from->exit_int_info_err; dst->nested_ctl = from->nested_ctl; dst->event_inj = from->event_inj; dst->event_inj_err = from->event_inj_err; dst->nested_cr3 = from->nested_cr3; dst->lbr_ctl = from->lbr_ctl; } static int nested_svm_vmexit(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; struct page *page; trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, vmcb->control.exit_info_1, vmcb->control.exit_info_2, vmcb->control.exit_int_info, vmcb->control.exit_int_info_err, KVM_ISA_SVM); nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page); if (!nested_vmcb) return 1; /* Exit Guest-Mode */ leave_guest_mode(&svm->vcpu); svm->nested.vmcb = 0; /* Give the current vmcb to the guest */ disable_gif(svm); nested_vmcb->save.es = vmcb->save.es; nested_vmcb->save.cs = vmcb->save.cs; nested_vmcb->save.ss = vmcb->save.ss; nested_vmcb->save.ds = vmcb->save.ds; nested_vmcb->save.gdtr = vmcb->save.gdtr; nested_vmcb->save.idtr = vmcb->save.idtr; nested_vmcb->save.efer = svm->vcpu.arch.efer; nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); nested_vmcb->save.cr2 = vmcb->save.cr2; nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); nested_vmcb->save.rip = vmcb->save.rip; nested_vmcb->save.rsp = vmcb->save.rsp; nested_vmcb->save.rax = vmcb->save.rax; nested_vmcb->save.dr7 = vmcb->save.dr7; nested_vmcb->save.dr6 = vmcb->save.dr6; nested_vmcb->save.cpl = vmcb->save.cpl; nested_vmcb->control.int_ctl = vmcb->control.int_ctl; nested_vmcb->control.int_vector = vmcb->control.int_vector; nested_vmcb->control.int_state = vmcb->control.int_state; nested_vmcb->control.exit_code = vmcb->control.exit_code; nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi; nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1; nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info; nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; if (svm->nrips_enabled) nested_vmcb->control.next_rip = vmcb->control.next_rip; /* * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have * to make sure that we do not lose injected events. So check event_inj * here and copy it to exit_int_info if it is valid. * Exit_int_info and event_inj can't be both valid because the case * below only happens on a VMRUN instruction intercept which has * no valid exit_int_info set. */ if (vmcb->control.event_inj & SVM_EVTINJ_VALID) { struct vmcb_control_area *nc = &nested_vmcb->control; nc->exit_int_info = vmcb->control.event_inj; nc->exit_int_info_err = vmcb->control.event_inj_err; } nested_vmcb->control.tlb_ctl = 0; nested_vmcb->control.event_inj = 0; nested_vmcb->control.event_inj_err = 0; /* We always set V_INTR_MASKING and remember the old value in hflags */ if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; /* Restore the original control entries */ copy_vmcb_control_area(vmcb, hsave); kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); svm->nested.nested_cr3 = 0; /* Restore selected save entries */ svm->vmcb->save.es = hsave->save.es; svm->vmcb->save.cs = hsave->save.cs; svm->vmcb->save.ss = hsave->save.ss; svm->vmcb->save.ds = hsave->save.ds; svm->vmcb->save.gdtr = hsave->save.gdtr; svm->vmcb->save.idtr = hsave->save.idtr; kvm_set_rflags(&svm->vcpu, hsave->save.rflags); svm_set_efer(&svm->vcpu, hsave->save.efer); svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); svm_set_cr4(&svm->vcpu, hsave->save.cr4); if (npt_enabled) { svm->vmcb->save.cr3 = hsave->save.cr3; svm->vcpu.arch.cr3 = hsave->save.cr3; } else { (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); } kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); svm->vmcb->save.dr7 = 0; svm->vmcb->save.cpl = 0; svm->vmcb->control.exit_int_info = 0; mark_all_dirty(svm->vmcb); nested_svm_unmap(page); nested_svm_uninit_mmu_context(&svm->vcpu); kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_load(&svm->vcpu); return 0; } static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) { /* * This function merges the msr permission bitmaps of kvm and the * nested vmcb. It is optimized in that it only merges the parts where * the kvm msr permission bitmap may contain zero bits */ int i; if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) return true; for (i = 0; i < MSRPM_OFFSETS; i++) { u32 value, p; u64 offset; if (msrpm_offsets[i] == 0xffffffff) break; p = msrpm_offsets[i]; offset = svm->nested.vmcb_msrpm + (p * 4); if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) return false; svm->nested.msrpm[p] = svm->msrpm[p] | value; } svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); return true; } static bool nested_vmcb_checks(struct vmcb *vmcb) { if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0) return false; if (vmcb->control.asid == 0) return false; if (vmcb->control.nested_ctl && !npt_enabled) return false; return true; } static bool nested_svm_vmrun(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; struct page *page; u64 vmcb_gpa; vmcb_gpa = svm->vmcb->save.rax; nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return false; if (!nested_vmcb_checks(nested_vmcb)) { nested_vmcb->control.exit_code = SVM_EXIT_ERR; nested_vmcb->control.exit_code_hi = 0; nested_vmcb->control.exit_info_1 = 0; nested_vmcb->control.exit_info_2 = 0; nested_svm_unmap(page); return false; } trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, nested_vmcb->save.rip, nested_vmcb->control.int_ctl, nested_vmcb->control.event_inj, nested_vmcb->control.nested_ctl); trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, nested_vmcb->control.intercept_cr >> 16, nested_vmcb->control.intercept_exceptions, nested_vmcb->control.intercept); /* Clear internal status */ kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); /* * Save the old vmcb, so we don't need to pick what we save, but can * restore everything when a VMEXIT occurs */ hsave->save.es = vmcb->save.es; hsave->save.cs = vmcb->save.cs; hsave->save.ss = vmcb->save.ss; hsave->save.ds = vmcb->save.ds; hsave->save.gdtr = vmcb->save.gdtr; hsave->save.idtr = vmcb->save.idtr; hsave->save.efer = svm->vcpu.arch.efer; hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); hsave->save.cr4 = svm->vcpu.arch.cr4; hsave->save.rflags = kvm_get_rflags(&svm->vcpu); hsave->save.rip = kvm_rip_read(&svm->vcpu); hsave->save.rsp = vmcb->save.rsp; hsave->save.rax = vmcb->save.rax; if (npt_enabled) hsave->save.cr3 = vmcb->save.cr3; else hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); copy_vmcb_control_area(hsave, vmcb); if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) svm->vcpu.arch.hflags |= HF_HIF_MASK; else svm->vcpu.arch.hflags &= ~HF_HIF_MASK; if (nested_vmcb->control.nested_ctl) { kvm_mmu_unload(&svm->vcpu); svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; nested_svm_init_mmu_context(&svm->vcpu); } /* Load the nested guest state */ svm->vmcb->save.es = nested_vmcb->save.es; svm->vmcb->save.cs = nested_vmcb->save.cs; svm->vmcb->save.ss = nested_vmcb->save.ss; svm->vmcb->save.ds = nested_vmcb->save.ds; svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; svm->vmcb->save.idtr = nested_vmcb->save.idtr; kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); if (npt_enabled) { svm->vmcb->save.cr3 = nested_vmcb->save.cr3; svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; } else (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); /* Guest paging mode is active - reset mmu */ kvm_mmu_reset_context(&svm->vcpu); svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); /* In case we don't even reach vcpu_run, the fields are not updated */ svm->vmcb->save.rax = nested_vmcb->save.rax; svm->vmcb->save.rsp = nested_vmcb->save.rsp; svm->vmcb->save.rip = nested_vmcb->save.rip; svm->vmcb->save.dr7 = nested_vmcb->save.dr7; svm->vmcb->save.dr6 = nested_vmcb->save.dr6; svm->vmcb->save.cpl = nested_vmcb->save.cpl; svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; /* cache intercepts */ svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; svm->nested.intercept = nested_vmcb->control.intercept; svm_flush_tlb(&svm->vcpu); svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) svm->vcpu.arch.hflags |= HF_VINTR_MASK; else svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { /* We only want the cr8 intercept bits of the guest */ clr_cr_intercept(svm, INTERCEPT_CR8_READ); clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); } /* We don't want to see VMMCALLs from a nested guest */ clr_intercept(svm, INTERCEPT_VMMCALL); svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl; svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; svm->vmcb->control.int_state = nested_vmcb->control.int_state; svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; nested_svm_unmap(page); /* Enter Guest-Mode */ enter_guest_mode(&svm->vcpu); /* * Merge guest and host intercepts - must be called with vcpu in * guest-mode to take affect here */ recalc_intercepts(svm); svm->nested.vmcb = vmcb_gpa; enable_gif(svm); mark_all_dirty(svm->vmcb); return true; } static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) { to_vmcb->save.fs = from_vmcb->save.fs; to_vmcb->save.gs = from_vmcb->save.gs; to_vmcb->save.tr = from_vmcb->save.tr; to_vmcb->save.ldtr = from_vmcb->save.ldtr; to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; to_vmcb->save.star = from_vmcb->save.star; to_vmcb->save.lstar = from_vmcb->save.lstar; to_vmcb->save.cstar = from_vmcb->save.cstar; to_vmcb->save.sfmask = from_vmcb->save.sfmask; to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; } static int vmload_interception(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct page *page; if (nested_svm_check_permissions(svm)) return 1; nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); nested_svm_vmloadsave(nested_vmcb, svm->vmcb); nested_svm_unmap(page); return 1; } static int vmsave_interception(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; struct page *page; if (nested_svm_check_permissions(svm)) return 1; nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); nested_svm_vmloadsave(svm->vmcb, nested_vmcb); nested_svm_unmap(page); return 1; } static int vmrun_interception(struct vcpu_svm *svm) { if (nested_svm_check_permissions(svm)) return 1; /* Save rip after vmrun instruction */ kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3); if (!nested_svm_vmrun(svm)) return 1; if (!nested_svm_vmrun_msrpm(svm)) goto failed; return 1; failed: svm->vmcb->control.exit_code = SVM_EXIT_ERR; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = 0; svm->vmcb->control.exit_info_2 = 0; nested_svm_vmexit(svm); return 1; } static int stgi_interception(struct vcpu_svm *svm) { if (nested_svm_check_permissions(svm)) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); enable_gif(svm); return 1; } static int clgi_interception(struct vcpu_svm *svm) { if (nested_svm_check_permissions(svm)) return 1; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); disable_gif(svm); /* After a CLGI no interrupts should come */ svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; mark_dirty(svm->vmcb, VMCB_INTR); return 1; } static int invlpga_interception(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX), kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); return 1; } static int skinit_interception(struct vcpu_svm *svm) { trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } static int wbinvd_interception(struct vcpu_svm *svm) { kvm_emulate_wbinvd(&svm->vcpu); return 1; } static int xsetbv_interception(struct vcpu_svm *svm) { u64 new_bv = kvm_read_edx_eax(&svm->vcpu); u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); } return 1; } static int task_switch_interception(struct vcpu_svm *svm) { u16 tss_selector; int reason; int int_type = svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; uint32_t type = svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; uint32_t idt_v = svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; bool has_error_code = false; u32 error_code = 0; tss_selector = (u16)svm->vmcb->control.exit_info_1; if (svm->vmcb->control.exit_info_2 & (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) reason = TASK_SWITCH_IRET; else if (svm->vmcb->control.exit_info_2 & (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) reason = TASK_SWITCH_JMP; else if (idt_v) reason = TASK_SWITCH_GATE; else reason = TASK_SWITCH_CALL; if (reason == TASK_SWITCH_GATE) { switch (type) { case SVM_EXITINTINFO_TYPE_NMI: svm->vcpu.arch.nmi_injected = false; break; case SVM_EXITINTINFO_TYPE_EXEPT: if (svm->vmcb->control.exit_info_2 & (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { has_error_code = true; error_code = (u32)svm->vmcb->control.exit_info_2; } kvm_clear_exception_queue(&svm->vcpu); break; case SVM_EXITINTINFO_TYPE_INTR: kvm_clear_interrupt_queue(&svm->vcpu); break; default: break; } } if (reason != TASK_SWITCH_GATE || int_type == SVM_EXITINTINFO_TYPE_SOFT || (int_type == SVM_EXITINTINFO_TYPE_EXEPT && (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) skip_emulated_instruction(&svm->vcpu); if (int_type != SVM_EXITINTINFO_TYPE_SOFT) int_vec = -1; if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, has_error_code, error_code) == EMULATE_FAIL) { svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR; svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; svm->vcpu.run->internal.ndata = 0; return 0; } return 1; } static int cpuid_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; kvm_emulate_cpuid(&svm->vcpu); return 1; } static int iret_interception(struct vcpu_svm *svm) { ++svm->vcpu.stat.nmi_window_exits; clr_intercept(svm, INTERCEPT_IRET); svm->vcpu.arch.hflags |= HF_IRET_MASK; svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); return 1; } static int invlpg_interception(struct vcpu_svm *svm) { if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); skip_emulated_instruction(&svm->vcpu); return 1; } static int emulate_on_interception(struct vcpu_svm *svm) { return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; } static int rdpmc_interception(struct vcpu_svm *svm) { int err; if (!static_cpu_has(X86_FEATURE_NRIPS)) return emulate_on_interception(svm); err = kvm_rdpmc(&svm->vcpu); kvm_complete_insn_gp(&svm->vcpu, err); return 1; } static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val) { unsigned long cr0 = svm->vcpu.arch.cr0; bool ret = false; u64 intercept; intercept = svm->nested.intercept; if (!is_guest_mode(&svm->vcpu) || (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))) return false; cr0 &= ~SVM_CR0_SELECTIVE_MASK; val &= ~SVM_CR0_SELECTIVE_MASK; if (cr0 ^ val) { svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); } return ret; } #define CR_VALID (1ULL << 63) static int cr_interception(struct vcpu_svm *svm) { int reg, cr; unsigned long val; int err; if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_on_interception(svm); if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) return emulate_on_interception(svm); reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; else cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; err = 0; if (cr >= 16) { /* mov to cr */ cr -= 16; val = kvm_register_read(&svm->vcpu, reg); switch (cr) { case 0: if (!check_selective_cr0_intercepted(svm, val)) err = kvm_set_cr0(&svm->vcpu, val); else return 1; break; case 3: err = kvm_set_cr3(&svm->vcpu, val); break; case 4: err = kvm_set_cr4(&svm->vcpu, val); break; case 8: err = kvm_set_cr8(&svm->vcpu, val); break; default: WARN(1, "unhandled write to CR%d", cr); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } } else { /* mov from cr */ switch (cr) { case 0: val = kvm_read_cr0(&svm->vcpu); break; case 2: val = svm->vcpu.arch.cr2; break; case 3: val = kvm_read_cr3(&svm->vcpu); break; case 4: val = kvm_read_cr4(&svm->vcpu); break; case 8: val = kvm_get_cr8(&svm->vcpu); break; default: WARN(1, "unhandled read from CR%d", cr); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } kvm_register_write(&svm->vcpu, reg, val); } kvm_complete_insn_gp(&svm->vcpu, err); return 1; } static int dr_interception(struct vcpu_svm *svm) { int reg, dr; unsigned long val; if (svm->vcpu.guest_debug == 0) { /* * No more DR vmexits; force a reload of the debug registers * and reenter on this instruction. The next vmexit will * retrieve the full state of the debug registers. */ clr_dr_intercepts(svm); svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; return 1; } if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_on_interception(svm); reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; if (dr >= 16) { /* mov to DRn */ if (!kvm_require_dr(&svm->vcpu, dr - 16)) return 1; val = kvm_register_read(&svm->vcpu, reg); kvm_set_dr(&svm->vcpu, dr - 16, val); } else { if (!kvm_require_dr(&svm->vcpu, dr)) return 1; kvm_get_dr(&svm->vcpu, dr, &val); kvm_register_write(&svm->vcpu, reg, val); } skip_emulated_instruction(&svm->vcpu); return 1; } static int cr8_write_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; int r; u8 cr8_prev = kvm_get_cr8(&svm->vcpu); /* instruction emulation calls kvm_set_cr8() */ r = cr_interception(svm); if (lapic_in_kernel(&svm->vcpu)) return r; if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) return r; kvm_run->exit_reason = KVM_EXIT_SET_TPR; return 0; } static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) { struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); return vmcb->control.tsc_offset + host_tsc; } static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_svm *svm = to_svm(vcpu); switch (msr_info->index) { case MSR_IA32_TSC: { msr_info->data = svm->vmcb->control.tsc_offset + kvm_scale_tsc(vcpu, rdtsc()); break; } case MSR_STAR: msr_info->data = svm->vmcb->save.star; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: msr_info->data = svm->vmcb->save.lstar; break; case MSR_CSTAR: msr_info->data = svm->vmcb->save.cstar; break; case MSR_KERNEL_GS_BASE: msr_info->data = svm->vmcb->save.kernel_gs_base; break; case MSR_SYSCALL_MASK: msr_info->data = svm->vmcb->save.sfmask; break; #endif case MSR_IA32_SYSENTER_CS: msr_info->data = svm->vmcb->save.sysenter_cs; break; case MSR_IA32_SYSENTER_EIP: msr_info->data = svm->sysenter_eip; break; case MSR_IA32_SYSENTER_ESP: msr_info->data = svm->sysenter_esp; break; /* * Nobody will change the following 5 values in the VMCB so we can * safely return them on rdmsr. They will always be 0 until LBRV is * implemented. */ case MSR_IA32_DEBUGCTLMSR: msr_info->data = svm->vmcb->save.dbgctl; break; case MSR_IA32_LASTBRANCHFROMIP: msr_info->data = svm->vmcb->save.br_from; break; case MSR_IA32_LASTBRANCHTOIP: msr_info->data = svm->vmcb->save.br_to; break; case MSR_IA32_LASTINTFROMIP: msr_info->data = svm->vmcb->save.last_excp_from; break; case MSR_IA32_LASTINTTOIP: msr_info->data = svm->vmcb->save.last_excp_to; break; case MSR_VM_HSAVE_PA: msr_info->data = svm->nested.hsave_msr; break; case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; case MSR_IA32_UCODE_REV: msr_info->data = 0x01000065; break; default: return kvm_get_msr_common(vcpu, msr_info); } return 0; } static int rdmsr_interception(struct vcpu_svm *svm) { u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); struct msr_data msr_info; msr_info.index = ecx; msr_info.host_initiated = false; if (svm_get_msr(&svm->vcpu, &msr_info)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(&svm->vcpu, 0); } else { trace_kvm_msr_read(ecx, msr_info.data); kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, msr_info.data & 0xffffffff); kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, msr_info.data >> 32); svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; skip_emulated_instruction(&svm->vcpu); } return 1; } static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) { struct vcpu_svm *svm = to_svm(vcpu); int svm_dis, chg_mask; if (data & ~SVM_VM_CR_VALID_MASK) return 1; chg_mask = SVM_VM_CR_VALID_MASK; if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); svm->nested.vm_cr_msr &= ~chg_mask; svm->nested.vm_cr_msr |= (data & chg_mask); svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; /* check for svm_disable while efer.svme is set */ if (svm_dis && (vcpu->arch.efer & EFER_SVME)) return 1; return 0; } static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct vcpu_svm *svm = to_svm(vcpu); u32 ecx = msr->index; u64 data = msr->data; switch (ecx) { case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; case MSR_STAR: svm->vmcb->save.star = data; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: svm->vmcb->save.lstar = data; break; case MSR_CSTAR: svm->vmcb->save.cstar = data; break; case MSR_KERNEL_GS_BASE: svm->vmcb->save.kernel_gs_base = data; break; case MSR_SYSCALL_MASK: svm->vmcb->save.sfmask = data; break; #endif case MSR_IA32_SYSENTER_CS: svm->vmcb->save.sysenter_cs = data; break; case MSR_IA32_SYSENTER_EIP: svm->sysenter_eip = data; svm->vmcb->save.sysenter_eip = data; break; case MSR_IA32_SYSENTER_ESP: svm->sysenter_esp = data; svm->vmcb->save.sysenter_esp = data; break; case MSR_IA32_DEBUGCTLMSR: if (!boot_cpu_has(X86_FEATURE_LBRV)) { vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", __func__, data); break; } if (data & DEBUGCTL_RESERVED_BITS) return 1; svm->vmcb->save.dbgctl = data; mark_dirty(svm->vmcb, VMCB_LBR); if (data & (1ULL<<0)) svm_enable_lbrv(svm); else svm_disable_lbrv(svm); break; case MSR_VM_HSAVE_PA: svm->nested.hsave_msr = data; break; case MSR_VM_CR: return svm_set_vm_cr(vcpu, data); case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; default: return kvm_set_msr_common(vcpu, msr); } return 0; } static int wrmsr_interception(struct vcpu_svm *svm) { struct msr_data msr; u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); u64 data = kvm_read_edx_eax(&svm->vcpu); msr.data = data; msr.index = ecx; msr.host_initiated = false; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; if (kvm_set_msr(&svm->vcpu, &msr)) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(&svm->vcpu, 0); } else { trace_kvm_msr_write(ecx, data); skip_emulated_instruction(&svm->vcpu); } return 1; } static int msr_interception(struct vcpu_svm *svm) { if (svm->vmcb->control.exit_info_1) return wrmsr_interception(svm); else return rdmsr_interception(svm); } static int interrupt_window_interception(struct vcpu_svm *svm) { kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; mark_dirty(svm->vmcb, VMCB_INTR); ++svm->vcpu.stat.irq_window_exits; return 1; } static int pause_interception(struct vcpu_svm *svm) { kvm_vcpu_on_spin(&(svm->vcpu)); return 1; } static int nop_interception(struct vcpu_svm *svm) { skip_emulated_instruction(&(svm->vcpu)); return 1; } static int monitor_interception(struct vcpu_svm *svm) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); return nop_interception(svm); } static int mwait_interception(struct vcpu_svm *svm) { printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); return nop_interception(svm); } static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_READ_CR0] = cr_interception, [SVM_EXIT_READ_CR3] = cr_interception, [SVM_EXIT_READ_CR4] = cr_interception, [SVM_EXIT_READ_CR8] = cr_interception, [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, [SVM_EXIT_WRITE_CR0] = cr_interception, [SVM_EXIT_WRITE_CR3] = cr_interception, [SVM_EXIT_WRITE_CR4] = cr_interception, [SVM_EXIT_WRITE_CR8] = cr8_write_interception, [SVM_EXIT_READ_DR0] = dr_interception, [SVM_EXIT_READ_DR1] = dr_interception, [SVM_EXIT_READ_DR2] = dr_interception, [SVM_EXIT_READ_DR3] = dr_interception, [SVM_EXIT_READ_DR4] = dr_interception, [SVM_EXIT_READ_DR5] = dr_interception, [SVM_EXIT_READ_DR6] = dr_interception, [SVM_EXIT_READ_DR7] = dr_interception, [SVM_EXIT_WRITE_DR0] = dr_interception, [SVM_EXIT_WRITE_DR1] = dr_interception, [SVM_EXIT_WRITE_DR2] = dr_interception, [SVM_EXIT_WRITE_DR3] = dr_interception, [SVM_EXIT_WRITE_DR4] = dr_interception, [SVM_EXIT_WRITE_DR5] = dr_interception, [SVM_EXIT_WRITE_DR6] = dr_interception, [SVM_EXIT_WRITE_DR7] = dr_interception, [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, [SVM_EXIT_INTR] = intr_interception, [SVM_EXIT_NMI] = nmi_interception, [SVM_EXIT_SMI] = nop_on_interception, [SVM_EXIT_INIT] = nop_on_interception, [SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_RDPMC] = rdpmc_interception, [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_PAUSE] = pause_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, [SVM_EXIT_INVLPGA] = invlpga_interception, [SVM_EXIT_IOIO] = io_interception, [SVM_EXIT_MSR] = msr_interception, [SVM_EXIT_TASK_SWITCH] = task_switch_interception, [SVM_EXIT_SHUTDOWN] = shutdown_interception, [SVM_EXIT_VMRUN] = vmrun_interception, [SVM_EXIT_VMMCALL] = vmmcall_interception, [SVM_EXIT_VMLOAD] = vmload_interception, [SVM_EXIT_VMSAVE] = vmsave_interception, [SVM_EXIT_STGI] = stgi_interception, [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = skinit_interception, [SVM_EXIT_WBINVD] = wbinvd_interception, [SVM_EXIT_MONITOR] = monitor_interception, [SVM_EXIT_MWAIT] = mwait_interception, [SVM_EXIT_XSETBV] = xsetbv_interception, [SVM_EXIT_NPF] = pf_interception, [SVM_EXIT_RSM] = emulate_on_interception, }; static void dump_vmcb(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_save_area *save = &svm->vmcb->save; pr_err("VMCB Control Area:\n"); pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff); pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16); pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff); pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16); pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions); pr_err("%-20s%016llx\n", "intercepts:", control->intercept); pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); pr_err("%-20s%d\n", "asid:", control->asid); pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); pr_err("%-20s%08x\n", "int_vector:", control->int_vector); pr_err("%-20s%08x\n", "int_state:", control->int_state); pr_err("%-20s%08x\n", "exit_code:", control->exit_code); pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); pr_err("%-20s%08x\n", "event_inj:", control->event_inj); pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl); pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); pr_err("VMCB State Save Area:\n"); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "es:", save->es.selector, save->es.attrib, save->es.limit, save->es.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "cs:", save->cs.selector, save->cs.attrib, save->cs.limit, save->cs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ss:", save->ss.selector, save->ss.attrib, save->ss.limit, save->ss.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ds:", save->ds.selector, save->ds.attrib, save->ds.limit, save->ds.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "fs:", save->fs.selector, save->fs.attrib, save->fs.limit, save->fs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "gs:", save->gs.selector, save->gs.attrib, save->gs.limit, save->gs.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "gdtr:", save->gdtr.selector, save->gdtr.attrib, save->gdtr.limit, save->gdtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "ldtr:", save->ldtr.selector, save->ldtr.attrib, save->ldtr.limit, save->ldtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "idtr:", save->idtr.selector, save->idtr.attrib, save->idtr.limit, save->idtr.base); pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", "tr:", save->tr.selector, save->tr.attrib, save->tr.limit, save->tr.base); pr_err("cpl: %d efer: %016llx\n", save->cpl, save->efer); pr_err("%-15s %016llx %-13s %016llx\n", "cr0:", save->cr0, "cr2:", save->cr2); pr_err("%-15s %016llx %-13s %016llx\n", "cr3:", save->cr3, "cr4:", save->cr4); pr_err("%-15s %016llx %-13s %016llx\n", "dr6:", save->dr6, "dr7:", save->dr7); pr_err("%-15s %016llx %-13s %016llx\n", "rip:", save->rip, "rflags:", save->rflags); pr_err("%-15s %016llx %-13s %016llx\n", "rsp:", save->rsp, "rax:", save->rax); pr_err("%-15s %016llx %-13s %016llx\n", "star:", save->star, "lstar:", save->lstar); pr_err("%-15s %016llx %-13s %016llx\n", "cstar:", save->cstar, "sfmask:", save->sfmask); pr_err("%-15s %016llx %-13s %016llx\n", "kernel_gs_base:", save->kernel_gs_base, "sysenter_cs:", save->sysenter_cs); pr_err("%-15s %016llx %-13s %016llx\n", "sysenter_esp:", save->sysenter_esp, "sysenter_eip:", save->sysenter_eip); pr_err("%-15s %016llx %-13s %016llx\n", "gpat:", save->g_pat, "dbgctl:", save->dbgctl); pr_err("%-15s %016llx %-13s %016llx\n", "br_from:", save->br_from, "br_to:", save->br_to); pr_err("%-15s %016llx %-13s %016llx\n", "excp_from:", save->last_excp_from, "excp_to:", save->last_excp_to); } static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; *info1 = control->exit_info_1; *info2 = control->exit_info_2; } static int handle_exit(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) vcpu->arch.cr0 = svm->vmcb->save.cr0; if (npt_enabled) vcpu->arch.cr3 = svm->vmcb->save.cr3; if (unlikely(svm->nested.exit_required)) { nested_svm_vmexit(svm); svm->nested.exit_required = false; return 1; } if (is_guest_mode(vcpu)) { int vmexit; trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, svm->vmcb->control.exit_info_1, svm->vmcb->control.exit_info_2, svm->vmcb->control.exit_int_info, svm->vmcb->control.exit_int_info_err, KVM_ISA_SVM); vmexit = nested_svm_exit_special(svm); if (vmexit == NESTED_EXIT_CONTINUE) vmexit = nested_svm_exit_handled(svm); if (vmexit == NESTED_EXIT_DONE) return 1; } svm_complete_interrupts(svm); if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.hardware_entry_failure_reason = svm->vmcb->control.exit_code; pr_err("KVM: FAILED VMRUN WITH VMCB:\n"); dump_vmcb(vcpu); return 0; } if (is_external_interrupt(svm->vmcb->control.exit_int_info) && exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " "exit_code 0x%x\n", __func__, svm->vmcb->control.exit_int_info, exit_code); if (exit_code >= ARRAY_SIZE(svm_exit_handlers) || !svm_exit_handlers[exit_code]) { WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code); kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return svm_exit_handlers[exit_code](svm); } static void reload_tss(struct kvm_vcpu *vcpu) { int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); sd->tss_desc->type = 9; /* available 32/64-bit TSS */ load_TR_desc(); } static void pre_svm_run(struct vcpu_svm *svm) { int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); /* FIXME: handle wraparound of asid_generation */ if (svm->asid_generation != sd->asid_generation) new_asid(svm, sd); } static void svm_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; vcpu->arch.hflags |= HF_NMI_MASK; set_intercept(svm, INTERCEPT_IRET); ++vcpu->stat.nmi_injections; } static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) { struct vmcb_control_area *control; control = &svm->vmcb->control; control->int_vector = irq; control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl |= V_IRQ_MASK | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); mark_dirty(svm->vmcb, VMCB_INTR); } static void svm_set_irq(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); BUG_ON(!(gif_set(svm))); trace_kvm_inj_virq(vcpu->arch.interrupt.nr); ++vcpu->stat.irq_injections; svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); if (irr == -1) return; if (tpr >= irr) set_cr_intercept(svm, INTERCEPT_CR8_WRITE); } static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { return; } static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu) { return 0; } static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu) { return; } static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) { return; } static int svm_nmi_allowed(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int ret; ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && !(svm->vcpu.arch.hflags & HF_NMI_MASK); ret = ret && gif_set(svm) && nested_svm_nmi(svm); return ret; } static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); } static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_svm *svm = to_svm(vcpu); if (masked) { svm->vcpu.arch.hflags |= HF_NMI_MASK; set_intercept(svm, INTERCEPT_IRET); } else { svm->vcpu.arch.hflags &= ~HF_NMI_MASK; clr_intercept(svm, INTERCEPT_IRET); } } static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int ret; if (!gif_set(svm) || (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)) return 0; ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF); if (is_guest_mode(vcpu)) return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); return ret; } static void enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); /* * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes * 1, because that's a separate STGI/VMRUN intercept. The next time we * get that intercept, this function will be called again though and * we'll get the vintr intercept. */ if (gif_set(svm) && nested_svm_intr(svm)) { svm_set_vintr(svm); svm_inject_irq(svm, 0x0); } } static void enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) return; /* IRET will cause a vm exit */ /* * Something prevents NMI from been injected. Single step over possible * problem (IRET or exception injection or interrupt shadow) */ svm->nmi_singlestep = true; svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); update_db_bp_intercept(vcpu); } static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) { return 0; } static void svm_flush_tlb(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; else svm->asid_generation--; } static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { } static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) { int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; kvm_set_cr8(vcpu, cr8); } } static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u64 cr8; if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; cr8 = kvm_get_cr8(vcpu); svm->vmcb->control.int_ctl &= ~V_TPR_MASK; svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; } static void svm_complete_interrupts(struct vcpu_svm *svm) { u8 vector; int type; u32 exitintinfo = svm->vmcb->control.exit_int_info; unsigned int3_injected = svm->int3_injected; svm->int3_injected = 0; /* * If we've made progress since setting HF_IRET_MASK, we've * executed an IRET and can allow NMI injection. */ if ((svm->vcpu.arch.hflags & HF_IRET_MASK) && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); } svm->vcpu.arch.nmi_injected = false; kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); if (!(exitintinfo & SVM_EXITINTINFO_VALID)) return; kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; switch (type) { case SVM_EXITINTINFO_TYPE_NMI: svm->vcpu.arch.nmi_injected = true; break; case SVM_EXITINTINFO_TYPE_EXEPT: /* * In case of software exceptions, do not reinject the vector, * but re-execute the instruction instead. Rewind RIP first * if we emulated INT3 before. */ if (kvm_exception_is_soft(vector)) { if (vector == BP_VECTOR && int3_injected && kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) - int3_injected); break; } if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { u32 err = svm->vmcb->control.exit_int_info_err; kvm_requeue_exception_e(&svm->vcpu, vector, err); } else kvm_requeue_exception(&svm->vcpu, vector); break; case SVM_EXITINTINFO_TYPE_INTR: kvm_queue_interrupt(&svm->vcpu, vector, false); break; default: break; } } static void svm_cancel_injection(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; control->exit_int_info = control->event_inj; control->exit_int_info_err = control->event_inj_err; control->event_inj = 0; svm_complete_interrupts(svm); } static void svm_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; /* * A vmexit emulation is required before the vcpu can be executed * again. */ if (unlikely(svm->nested.exit_required)) return; pre_svm_run(svm); sync_lapic_to_cr8(vcpu); svm->vmcb->save.cr2 = vcpu->arch.cr2; clgi(); local_irq_enable(); asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t" "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t" "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t" "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t" "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t" #ifdef CONFIG_X86_64 "mov %c[r8](%[svm]), %%r8 \n\t" "mov %c[r9](%[svm]), %%r9 \n\t" "mov %c[r10](%[svm]), %%r10 \n\t" "mov %c[r11](%[svm]), %%r11 \n\t" "mov %c[r12](%[svm]), %%r12 \n\t" "mov %c[r13](%[svm]), %%r13 \n\t" "mov %c[r14](%[svm]), %%r14 \n\t" "mov %c[r15](%[svm]), %%r15 \n\t" #endif /* Enter guest mode */ "push %%" _ASM_AX " \n\t" "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t" __ex(SVM_VMLOAD) "\n\t" __ex(SVM_VMRUN) "\n\t" __ex(SVM_VMSAVE) "\n\t" "pop %%" _ASM_AX " \n\t" /* Save guest registers, load host registers */ "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t" "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t" "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t" "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t" "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t" "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t" #ifdef CONFIG_X86_64 "mov %%r8, %c[r8](%[svm]) \n\t" "mov %%r9, %c[r9](%[svm]) \n\t" "mov %%r10, %c[r10](%[svm]) \n\t" "mov %%r11, %c[r11](%[svm]) \n\t" "mov %%r12, %c[r12](%[svm]) \n\t" "mov %%r13, %c[r13](%[svm]) \n\t" "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" #endif "pop %%" _ASM_BP : : [svm]"a"(svm), [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP])) #ifdef CONFIG_X86_64 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])), [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])), [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) #endif : "cc", "memory" #ifdef CONFIG_X86_64 , "rbx", "rcx", "rdx", "rsi", "rdi" , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15" #else , "ebx", "ecx", "edx", "esi", "edi" #endif ); #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else loadsegment(fs, svm->host.fs); #ifndef CONFIG_X86_32_LAZY_GS loadsegment(gs, svm->host.gs); #endif #endif reload_tss(vcpu); local_irq_disable(); vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM); if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_handle_nmi(&svm->vcpu); stgi(); /* Any pending NMI will happen here */ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_after_handle_nmi(&svm->vcpu); sync_cr8_to_lapic(vcpu); svm->next_rip = 0; svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; /* if exit due to PF check for async PF */ if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) svm->apf_reason = kvm_read_and_reset_pf_reason(); if (npt_enabled) { vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); } /* * We need to handle MC intercepts here before the vcpu has a chance to * change the physical cpu */ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + MC_VECTOR)) svm_handle_mce(svm); mark_all_clean(svm->vmcb); } static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.cr3 = root; mark_dirty(svm->vmcb, VMCB_CR); svm_flush_tlb(vcpu); } static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) { struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->control.nested_cr3 = root; mark_dirty(svm->vmcb, VMCB_NPT); /* Also sync guest cr3 here in case we live migrate */ svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); mark_dirty(svm->vmcb, VMCB_CR); svm_flush_tlb(vcpu); } static int is_disabled(void) { u64 vm_cr; rdmsrl(MSR_VM_CR, vm_cr); if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) return 1; return 0; } static void svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) { /* * Patch in the VMMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xd9; } static void svm_check_processor_compat(void *rtn) { *(int *)rtn = 0; } static bool svm_cpu_has_accelerated_tpr(void) { return false; } static bool svm_has_high_real_mode_segbase(void) { return true; } static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { return 0; } static void svm_cpuid_update(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); /* Update nrips enabled cache */ svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu); } static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { switch (func) { case 0x80000001: if (nested) entry->ecx |= (1 << 2); /* Set SVM bit */ break; case 0x8000000A: entry->eax = 1; /* SVM revision 1 */ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper ASID emulation to nested SVM */ entry->ecx = 0; /* Reserved */ entry->edx = 0; /* Per default do not support any additional features */ /* Support next_rip if host supports it */ if (boot_cpu_has(X86_FEATURE_NRIPS)) entry->edx |= SVM_FEATURE_NRIP; /* Support NPT for the guest if enabled */ if (npt_enabled) entry->edx |= SVM_FEATURE_NPT; break; } } static int svm_get_lpage_level(void) { return PT_PDPE_LEVEL; } static bool svm_rdtscp_supported(void) { return false; } static bool svm_invpcid_supported(void) { return false; } static bool svm_mpx_supported(void) { return false; } static bool svm_xsaves_supported(void) { return false; } static bool svm_has_wbinvd_exit(void) { return true; } static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); set_exception_intercept(svm, NM_VECTOR); update_cr0_intercept(svm); } #define PRE_EX(exit) { .exit_code = (exit), \ .stage = X86_ICPT_PRE_EXCEPT, } #define POST_EX(exit) { .exit_code = (exit), \ .stage = X86_ICPT_POST_EXCEPT, } #define POST_MEM(exit) { .exit_code = (exit), \ .stage = X86_ICPT_POST_MEMACCESS, } static const struct __x86_intercept { u32 exit_code; enum x86_intercept_stage stage; } x86_intercept_map[] = { [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), }; #undef PRE_EX #undef POST_EX #undef POST_MEM static int svm_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) { struct vcpu_svm *svm = to_svm(vcpu); int vmexit, ret = X86EMUL_CONTINUE; struct __x86_intercept icpt_info; struct vmcb *vmcb = svm->vmcb; if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) goto out; icpt_info = x86_intercept_map[info->intercept]; if (stage != icpt_info.stage) goto out; switch (icpt_info.exit_code) { case SVM_EXIT_READ_CR0: if (info->intercept == x86_intercept_cr_read) icpt_info.exit_code += info->modrm_reg; break; case SVM_EXIT_WRITE_CR0: { unsigned long cr0, val; u64 intercept; if (info->intercept == x86_intercept_cr_write) icpt_info.exit_code += info->modrm_reg; if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || info->intercept == x86_intercept_clts) break; intercept = svm->nested.intercept; if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))) break; cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; if (info->intercept == x86_intercept_lmsw) { cr0 &= 0xfUL; val &= 0xfUL; /* lmsw can't clear PE - catch this here */ if (cr0 & X86_CR0_PE) val |= X86_CR0_PE; } if (cr0 ^ val) icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; break; } case SVM_EXIT_READ_DR0: case SVM_EXIT_WRITE_DR0: icpt_info.exit_code += info->modrm_reg; break; case SVM_EXIT_MSR: if (info->intercept == x86_intercept_wrmsr) vmcb->control.exit_info_1 = 1; else vmcb->control.exit_info_1 = 0; break; case SVM_EXIT_PAUSE: /* * We get this for NOP only, but pause * is rep not, check this here */ if (info->rep_prefix != REPE_PREFIX) goto out; case SVM_EXIT_IOIO: { u64 exit_info; u32 bytes; if (info->intercept == x86_intercept_in || info->intercept == x86_intercept_ins) { exit_info = ((info->src_val & 0xffff) << 16) | SVM_IOIO_TYPE_MASK; bytes = info->dst_bytes; } else { exit_info = (info->dst_val & 0xffff) << 16; bytes = info->src_bytes; } if (info->intercept == x86_intercept_outs || info->intercept == x86_intercept_ins) exit_info |= SVM_IOIO_STR_MASK; if (info->rep_prefix) exit_info |= SVM_IOIO_REP_MASK; bytes = min(bytes, 4u); exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); vmcb->control.exit_info_1 = exit_info; vmcb->control.exit_info_2 = info->next_rip; break; } default: break; } /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ if (static_cpu_has(X86_FEATURE_NRIPS)) vmcb->control.next_rip = info->next_rip; vmcb->control.exit_code = icpt_info.exit_code; vmexit = nested_svm_exit_handled(svm); ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED : X86EMUL_CONTINUE; out: return ret; } static void svm_handle_external_intr(struct kvm_vcpu *vcpu) { local_irq_enable(); } static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) { } static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, .hardware_setup = svm_hardware_setup, .hardware_unsetup = svm_hardware_unsetup, .check_processor_compatibility = svm_check_processor_compat, .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase, .vcpu_create = svm_create_vcpu, .vcpu_free = svm_free_vcpu, .vcpu_reset = svm_vcpu_reset, .prepare_guest_switch = svm_prepare_guest_switch, .vcpu_load = svm_vcpu_load, .vcpu_put = svm_vcpu_put, .update_db_bp_intercept = update_db_bp_intercept, .get_msr = svm_get_msr, .set_msr = svm_set_msr, .get_segment_base = svm_get_segment_base, .get_segment = svm_get_segment, .set_segment = svm_set_segment, .get_cpl = svm_get_cpl, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, .decache_cr0_guest_bits = svm_decache_cr0_guest_bits, .decache_cr3 = svm_decache_cr3, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .set_cr0 = svm_set_cr0, .set_cr3 = svm_set_cr3, .set_cr4 = svm_set_cr4, .set_efer = svm_set_efer, .get_idt = svm_get_idt, .set_idt = svm_set_idt, .get_gdt = svm_get_gdt, .set_gdt = svm_set_gdt, .get_dr6 = svm_get_dr6, .set_dr6 = svm_set_dr6, .set_dr7 = svm_set_dr7, .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, .cache_reg = svm_cache_reg, .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, .fpu_activate = svm_fpu_activate, .fpu_deactivate = svm_fpu_deactivate, .tlb_flush = svm_flush_tlb, .run = svm_vcpu_run, .handle_exit = handle_exit, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = svm_set_interrupt_shadow, .get_interrupt_shadow = svm_get_interrupt_shadow, .patch_hypercall = svm_patch_hypercall, .set_irq = svm_set_irq, .set_nmi = svm_inject_nmi, .queue_exception = svm_queue_exception, .cancel_injection = svm_cancel_injection, .interrupt_allowed = svm_interrupt_allowed, .nmi_allowed = svm_nmi_allowed, .get_nmi_mask = svm_get_nmi_mask, .set_nmi_mask = svm_set_nmi_mask, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, .cpu_uses_apicv = svm_cpu_uses_apicv, .load_eoi_exitmap = svm_load_eoi_exitmap, .sync_pir_to_irr = svm_sync_pir_to_irr, .set_tss_addr = svm_set_tss_addr, .get_tdp_level = get_npt_level, .get_mt_mask = svm_get_mt_mask, .get_exit_info = svm_get_exit_info, .get_lpage_level = svm_get_lpage_level, .cpuid_update = svm_cpuid_update, .rdtscp_supported = svm_rdtscp_supported, .invpcid_supported = svm_invpcid_supported, .mpx_supported = svm_mpx_supported, .xsaves_supported = svm_xsaves_supported, .set_supported_cpuid = svm_set_supported_cpuid, .has_wbinvd_exit = svm_has_wbinvd_exit, .read_tsc_offset = svm_read_tsc_offset, .write_tsc_offset = svm_write_tsc_offset, .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest, .read_l1_tsc = svm_read_l1_tsc, .set_tdp_cr3 = set_tdp_cr3, .check_intercept = svm_check_intercept, .handle_external_intr = svm_handle_external_intr, .sched_in = svm_sched_in, .pmu_ops = &amd_pmu_ops, }; static int __init svm_init(void) { return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm), THIS_MODULE); } static void __exit svm_exit(void) { kvm_exit(); } module_init(svm_init) module_exit(svm_exit)
./CrossVul/dataset_final_sorted/CWE-399/c/bad_1675_1
crossvul-cpp_data_good_5670_0
/* * Copyright (c) 2008-2011, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Lucy Liu <lucy.liu@intel.com> */ #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/slab.h> #include <net/netlink.h> #include <net/rtnetlink.h> #include <linux/dcbnl.h> #include <net/dcbevent.h> #include <linux/rtnetlink.h> #include <linux/module.h> #include <net/sock.h> /* Data Center Bridging (DCB) is a collection of Ethernet enhancements * intended to allow network traffic with differing requirements * (highly reliable, no drops vs. best effort vs. low latency) to operate * and co-exist on Ethernet. Current DCB features are: * * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a * framework for assigning bandwidth guarantees to traffic classes. * * Priority-based Flow Control (PFC) - provides a flow control mechanism which * can work independently for each 802.1p priority. * * Congestion Notification - provides a mechanism for end-to-end congestion * control for protocols which do not have built-in congestion management. * * More information about the emerging standards for these Ethernet features * can be found at: http://www.ieee802.org/1/pages/dcbridges.html * * This file implements an rtnetlink interface to allow configuration of DCB * features for capable devices. */ MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>"); MODULE_DESCRIPTION("Data Center Bridging netlink interface"); MODULE_LICENSE("GPL"); /**************** DCB attribute policies *************************************/ /* DCB netlink attributes policy */ static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, [DCB_ATTR_STATE] = {.type = NLA_U8}, [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_SET_ALL] = {.type = NLA_U8}, [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG}, [DCB_ATTR_CAP] = {.type = NLA_NESTED}, [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, [DCB_ATTR_BCN] = {.type = NLA_NESTED}, [DCB_ATTR_APP] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE] = {.type = NLA_NESTED}, [DCB_ATTR_DCBX] = {.type = NLA_U8}, [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED}, }; /* DCB priority flow control to User Priority nested attributes */ static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB priority grouping nested attributes */ static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED}, [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG}, }; /* DCB traffic class nested attributes. */ static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8}, [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, }; /* DCB BCN nested attributes. */ static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32}, [DCB_BCN_ATTR_BETA] = {.type = NLA_U32}, [DCB_BCN_ATTR_GD] = {.type = NLA_U32}, [DCB_BCN_ATTR_GI] = {.type = NLA_U32}, [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32}, [DCB_BCN_ATTR_TD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32}, [DCB_BCN_ATTR_W] = {.type = NLA_U32}, [DCB_BCN_ATTR_RD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RU] = {.type = NLA_U32}, [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32}, [DCB_BCN_ATTR_RI] = {.type = NLA_U32}, [DCB_BCN_ATTR_C] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB APP nested attributes. */ static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, [DCB_APP_ATTR_ID] = {.type = NLA_U16}, [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, }; /* IEEE 802.1Qaz nested attributes. */ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, }; static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)}, }; /* DCB number of traffic classes nested attributes. */ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = { [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8}, }; static LIST_HEAD(dcb_app_list); static DEFINE_SPINLOCK(dcb_lock); static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq, u32 flags, struct nlmsghdr **nlhp) { struct sk_buff *skb; struct dcbmsg *dcb; struct nlmsghdr *nlh; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return NULL; nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags); BUG_ON(!nlh); dcb = nlmsg_data(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = cmd; dcb->dcb_pad = 0; if (nlhp) *nlhp = nlh; return skb; } static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ if (!netdev->dcbnl_ops->getstate) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_STATE, netdev->dcbnl_ops->getstate(netdev)); } static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_PFC_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->getpfccfg) return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest); if (ret) return ret; nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG); if (!nest) return -EMSGSIZE; if (data[DCB_PFC_UP_ATTR_ALL]) getall = 1; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (!getall && !data[i]) continue; netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } nla_nest_end(skb, nest); return 0; } static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 perm_addr[MAX_ADDR_LEN]; if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); } static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_CAP]) return -EINVAL; if (!netdev->dcbnl_ops->getcap) return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], dcbnl_cap_nest); if (ret) return ret; nest = nla_nest_start(skb, DCB_ATTR_CAP); if (!nest) return -EMSGSIZE; if (data[DCB_CAP_ATTR_ALL]) getall = 1; for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) { if (!getall && !data[i]) continue; if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } } nla_nest_end(skb, nest); return 0; } static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_NUMTCS]) return -EINVAL; if (!netdev->dcbnl_ops->getnumtcs) return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest); if (ret) return ret; nest = nla_nest_start(skb, DCB_ATTR_NUMTCS); if (!nest) return -EMSGSIZE; if (data[DCB_NUMTCS_ATTR_ALL]) getall = 1; for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (!getall && !data[i]) continue; ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); if (!ret) { ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } else return -EINVAL; } nla_nest_end(skb, nest); return 0; } static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; int ret; u8 value; int i; if (!tb[DCB_ATTR_NUMTCS]) return -EINVAL; if (!netdev->dcbnl_ops->setnumtcs) return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest); if (ret) return ret; for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); if (ret) break; } return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret); } static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { if (!netdev->dcbnl_ops->getpfcstate) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_PFC_STATE, netdev->dcbnl_ops->getpfcstate(netdev)); } static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!tb[DCB_ATTR_PFC_STATE]) return -EINVAL; if (!netdev->dcbnl_ops->setpfcstate) return -EOPNOTSUPP; value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); netdev->dcbnl_ops->setpfcstate(netdev, value); return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0); } static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *app_nest; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; u16 id; u8 up, idtype; int ret; if (!tb[DCB_ATTR_APP]) return -EINVAL; ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest); if (ret) return ret; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID])) return -EINVAL; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) return -EINVAL; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); if (netdev->dcbnl_ops->getapp) { up = netdev->dcbnl_ops->getapp(netdev, idtype, id); } else { struct dcb_app app = { .selector = idtype, .protocol = id, }; up = dcb_getapp(netdev, &app); } app_nest = nla_nest_start(skb, DCB_ATTR_APP); if (!app_nest) return -EMSGSIZE; ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype); if (ret) goto out_cancel; ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id); if (ret) goto out_cancel; ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up); if (ret) goto out_cancel; nla_nest_end(skb, app_nest); return 0; out_cancel: nla_nest_cancel(skb, app_nest); return ret; } static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { int ret; u16 id; u8 up, idtype; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; if (!tb[DCB_ATTR_APP]) return -EINVAL; ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest); if (ret) return ret; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID]) || (!app_tb[DCB_APP_ATTR_PRIORITY])) return -EINVAL; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) return -EINVAL; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); if (netdev->dcbnl_ops->setapp) { ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); } else { struct dcb_app app; app.selector = idtype; app.protocol = id; app.priority = up; ret = dcb_setapp(netdev, &app); } ret = nla_put_u8(skb, DCB_ATTR_APP, ret); dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); return ret; } static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, struct nlattr **tb, struct sk_buff *skb, int dir) { struct nlattr *pg_nest, *param_nest, *data; struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; u8 prio, pgid, tc_pct, up_map; int ret; int getall = 0; int i; if (!tb[DCB_ATTR_PG_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->getpgtccfgtx || !netdev->dcbnl_ops->getpgtccfgrx || !netdev->dcbnl_ops->getpgbwgcfgtx || !netdev->dcbnl_ops->getpgbwgcfgrx) return -EOPNOTSUPP; ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); if (ret) return ret; pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG); if (!pg_nest) return -EMSGSIZE; if (pg_tb[DCB_PG_ATTR_TC_ALL]) getall = 1; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!getall && !pg_tb[i]) continue; if (pg_tb[DCB_PG_ATTR_TC_ALL]) data = pg_tb[DCB_PG_ATTR_TC_ALL]; else data = pg_tb[i]; ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, data, dcbnl_tc_param_nest); if (ret) goto err_pg; param_nest = nla_nest_start(skb, i); if (!param_nest) goto err_pg; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } else { /* Tx */ netdev->dcbnl_ops->getpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } if (param_tb[DCB_TC_ATTR_PARAM_PGID] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); if (ret) goto err_param; } nla_nest_end(skb, param_nest); } if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) getall = 1; else getall = 0; for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!getall && !pg_tb[i]) continue; tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } else { /* Tx */ netdev->dcbnl_ops->getpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } ret = nla_put_u8(skb, i, tc_pct); if (ret) goto err_pg; } nla_nest_end(skb, pg_nest); return 0; err_param: nla_nest_cancel(skb, param_nest); err_pg: nla_nest_cancel(skb, pg_nest); return -EMSGSIZE; } static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0); } static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1); } static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!tb[DCB_ATTR_STATE]) return -EINVAL; if (!netdev->dcbnl_ops->setstate) return -EOPNOTSUPP; value = nla_get_u8(tb[DCB_ATTR_STATE]); return nla_put_u8(skb, DCB_ATTR_STATE, netdev->dcbnl_ops->setstate(netdev, value)); } static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; int i; int ret; u8 value; if (!tb[DCB_ATTR_PFC_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->setpfccfg) return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest); if (ret) return ret; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); netdev->dcbnl_ops->setpfccfg(netdev, data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); } return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0); } static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { int ret; if (!tb[DCB_ATTR_SET_ALL]) return -EINVAL; if (!netdev->dcbnl_ops->setall) return -EOPNOTSUPP; ret = nla_put_u8(skb, DCB_ATTR_SET_ALL, netdev->dcbnl_ops->setall(netdev)); dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); return ret; } static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb, int dir) { struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; int ret; int i; u8 pgid; u8 up_map; u8 prio; u8 tc_pct; if (!tb[DCB_ATTR_PG_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->setpgtccfgtx || !netdev->dcbnl_ops->setpgtccfgrx || !netdev->dcbnl_ops->setpgbwgcfgtx || !netdev->dcbnl_ops->setpgbwgcfgrx) return -EOPNOTSUPP; ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); if (ret) return ret; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!pg_tb[i]) continue; ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, pg_tb[i], dcbnl_tc_param_nest); if (ret) return ret; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]) prio = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]); if (param_tb[DCB_TC_ATTR_PARAM_PGID]) pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]); if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT]) tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]); if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]) up_map = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } else { /* Tx */ netdev->dcbnl_ops->setpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!pg_tb[i]) continue; tc_pct = nla_get_u8(pg_tb[i]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } else { /* Tx */ netdev->dcbnl_ops->setpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } } return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0); } static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0); } static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1); } static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *bcn_nest; struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; u8 value_byte; u32 value_integer; int ret; bool getall = false; int i; if (!tb[DCB_ATTR_BCN]) return -EINVAL; if (!netdev->dcbnl_ops->getbcnrp || !netdev->dcbnl_ops->getbcncfg) return -EOPNOTSUPP; ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_bcn_nest); if (ret) return ret; bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN); if (!bcn_nest) return -EMSGSIZE; if (bcn_tb[DCB_BCN_ATTR_ALL]) getall = true; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, &value_byte); ret = nla_put_u8(skb, i, value_byte); if (ret) goto err_bcn; } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcncfg(netdev, i, &value_integer); ret = nla_put_u32(skb, i, value_integer); if (ret) goto err_bcn; } nla_nest_end(skb, bcn_nest); return 0; err_bcn: nla_nest_cancel(skb, bcn_nest); return ret; } static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; int i; int ret; u8 value_byte; u32 value_int; if (!tb[DCB_ATTR_BCN]) return -EINVAL; if (!netdev->dcbnl_ops->setbcncfg || !netdev->dcbnl_ops->setbcnrp) return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest); if (ret) return ret; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (data[i] == NULL) continue; value_byte = nla_get_u8(data[i]); netdev->dcbnl_ops->setbcnrp(netdev, data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte); } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (data[i] == NULL) continue; value_int = nla_get_u32(data[i]); netdev->dcbnl_ops->setbcncfg(netdev, i, value_int); } return nla_put_u8(skb, DCB_ATTR_BCN, 0); } static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, int app_nested_type, int app_info_type, int app_entry_type) { struct dcb_peer_app_info info; struct dcb_app *table = NULL; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; u16 app_count; int err; /** * retrieve the peer app configuration form the driver. If the driver * handlers fail exit without doing anything */ err = ops->peer_getappinfo(netdev, &info, &app_count); if (!err && app_count) { table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL); if (!table) return -ENOMEM; err = ops->peer_getapptable(netdev, table); } if (!err) { u16 i; struct nlattr *app; /** * build the message, from here on the only possible failure * is due to the skb size */ err = -EMSGSIZE; app = nla_nest_start(skb, app_nested_type); if (!app) goto nla_put_failure; if (app_info_type && nla_put(skb, app_info_type, sizeof(info), &info)) goto nla_put_failure; for (i = 0; i < app_count; i++) { if (nla_put(skb, app_entry_type, sizeof(struct dcb_app), &table[i])) goto nla_put_failure; } nla_nest_end(skb, app); } err = 0; nla_put_failure: kfree(table); return err; } /* Handle IEEE 802.1Qaz GET commands. */ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) { struct nlattr *ieee, *app; struct dcb_app_type *itr; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; int dcbx; int err; if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) return -EMSGSIZE; ieee = nla_nest_start(skb, DCB_ATTR_IEEE); if (!ieee) return -EMSGSIZE; if (ops->ieee_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) return -EMSGSIZE; } if (ops->ieee_getmaxrate) { struct ieee_maxrate maxrate; memset(&maxrate, 0, sizeof(maxrate)); err = ops->ieee_getmaxrate(netdev, &maxrate); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, sizeof(maxrate), &maxrate); if (err) return -EMSGSIZE; } } if (ops->ieee_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) return -EMSGSIZE; } app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); if (!app) return -EMSGSIZE; spin_lock(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), &itr->app); if (err) { spin_unlock(&dcb_lock); return -EMSGSIZE; } } } if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock(&dcb_lock); nla_nest_end(skb, app); /* get peer info if available */ if (ops->ieee_peer_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_peer_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) return -EMSGSIZE; } if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) return -EMSGSIZE; } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_IEEE_PEER_APP, DCB_ATTR_IEEE_APP_UNSPEC, DCB_ATTR_IEEE_APP); if (err) return -EMSGSIZE; } nla_nest_end(skb, ieee); if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) return -EMSGSIZE; } return 0; } static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, int dir) { u8 pgid, up_map, prio, tc_pct; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; struct nlattr *pg = nla_nest_start(skb, i); if (!pg) return -EMSGSIZE; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { struct nlattr *tc_nest = nla_nest_start(skb, i); if (!tc_nest) return -EMSGSIZE; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); else ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) return -EMSGSIZE; nla_nest_end(skb, tc_nest); } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); else ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); if (nla_put_u8(skb, i, tc_pct)) return -EMSGSIZE; } nla_nest_end(skb, pg); return 0; } static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) { struct nlattr *cee, *app; struct dcb_app_type *itr; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; int dcbx, i, err = -EMSGSIZE; u8 value; if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) goto nla_put_failure; cee = nla_nest_start(skb, DCB_ATTR_CEE); if (!cee) goto nla_put_failure; /* local pg */ if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { err = dcbnl_cee_pg_fill(skb, netdev, 1); if (err) goto nla_put_failure; } if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { err = dcbnl_cee_pg_fill(skb, netdev, 0); if (err) goto nla_put_failure; } /* local pfc */ if (ops->getpfccfg) { struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC); if (!pfc_nest) goto nla_put_failure; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); if (nla_put_u8(skb, i, value)) goto nla_put_failure; } nla_nest_end(skb, pfc_nest); } /* local app */ spin_lock(&dcb_lock); app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); if (!app) goto dcb_unlock; list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { struct nlattr *app_nest = nla_nest_start(skb, DCB_ATTR_APP); if (!app_nest) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, itr->app.selector); if (err) goto dcb_unlock; err = nla_put_u16(skb, DCB_APP_ATTR_ID, itr->app.protocol); if (err) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, itr->app.priority); if (err) goto dcb_unlock; nla_nest_end(skb, app_nest); } } nla_nest_end(skb, app); if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock(&dcb_lock); /* features flags */ if (ops->getfeatcfg) { struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT); if (!feat) goto nla_put_failure; for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; i++) if (!ops->getfeatcfg(netdev, i, &value) && nla_put_u8(skb, i, value)) goto nla_put_failure; nla_nest_end(skb, feat); } /* peer info if available */ if (ops->cee_peer_getpg) { struct cee_pg pg; memset(&pg, 0, sizeof(pg)); err = ops->cee_peer_getpg(netdev, &pg); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) goto nla_put_failure; } if (ops->cee_peer_getpfc) { struct cee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->cee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) goto nla_put_failure; } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_CEE_PEER_APP_TABLE, DCB_ATTR_CEE_PEER_APP_INFO, DCB_ATTR_CEE_PEER_APP); if (err) goto nla_put_failure; } nla_nest_end(skb, cee); /* DCBX state */ if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) goto nla_put_failure; } return 0; dcb_unlock: spin_unlock(&dcb_lock); nla_put_failure: return err; } static int dcbnl_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid, int dcbx_ver) { struct net *net = dev_net(dev); struct sk_buff *skb; struct nlmsghdr *nlh; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int err; if (!ops) return -EOPNOTSUPP; skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh); if (!skb) return -ENOBUFS; if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) err = dcbnl_ieee_fill(skb, dev); else err = dcbnl_cee_fill(skb, dev); if (err < 0) { /* Report error to broadcast listeners */ nlmsg_free(skb); rtnl_set_sk_err(net, RTNLGRP_DCB, err); } else { /* End nlmsg and notify broadcast listeners */ nlmsg_end(skb, nlh); rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL); } return err; } int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid) { return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE); } EXPORT_SYMBOL(dcbnl_ieee_notify); int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid) { return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE); } EXPORT_SYMBOL(dcbnl_cee_notify); /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not * be completed the entire msg is aborted and error value is returned. * No attempt is made to reconcile the case where only part of the * cmd can be completed. */ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; int err; if (!ops) return -EOPNOTSUPP; if (!tb[DCB_ATTR_IEEE]) return -EINVAL; err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); if (err) return err; if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) { struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]); err = ops->ieee_setets(netdev, ets); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) { struct ieee_maxrate *maxrate = nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]); err = ops->ieee_setmaxrate(netdev, maxrate); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); err = ops->ieee_setpfc(netdev, pfc); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { struct nlattr *attr; int rem; nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { struct dcb_app *app_data; if (nla_type(attr) != DCB_ATTR_IEEE_APP) continue; app_data = nla_data(attr); if (ops->ieee_setapp) err = ops->ieee_setapp(netdev, app_data); else err = dcb_ieee_setapp(netdev, app_data); if (err) goto err; } } err: err = nla_put_u8(skb, DCB_ATTR_IEEE, err); dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); return err; } static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; if (!ops) return -EOPNOTSUPP; return dcbnl_ieee_fill(skb, netdev); } static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; int err; if (!ops) return -EOPNOTSUPP; if (!tb[DCB_ATTR_IEEE]) return -EINVAL; err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); if (err) return err; if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { struct nlattr *attr; int rem; nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { struct dcb_app *app_data; if (nla_type(attr) != DCB_ATTR_IEEE_APP) continue; app_data = nla_data(attr); if (ops->ieee_delapp) err = ops->ieee_delapp(netdev, app_data); else err = dcb_ieee_delapp(netdev, app_data); if (err) goto err; } } err: err = nla_put_u8(skb, DCB_ATTR_IEEE, err); dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); return err; } /* DCBX configuration */ static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { if (!netdev->dcbnl_ops->getdcbx) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_DCBX, netdev->dcbnl_ops->getdcbx(netdev)); } static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!netdev->dcbnl_ops->setdcbx) return -EOPNOTSUPP; if (!tb[DCB_ATTR_DCBX]) return -EINVAL; value = nla_get_u8(tb[DCB_ATTR_DCBX]); return nla_put_u8(skb, DCB_ATTR_DCBX, netdev->dcbnl_ops->setdcbx(netdev, value)); } static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; u8 value; int ret, i; int getall = 0; if (!netdev->dcbnl_ops->getfeatcfg) return -EOPNOTSUPP; if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest); if (ret) return ret; nest = nla_nest_start(skb, DCB_ATTR_FEATCFG); if (!nest) return -EMSGSIZE; if (data[DCB_FEATCFG_ATTR_ALL]) getall = 1; for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { if (!getall && !data[i]) continue; ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); if (!ret) ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); goto nla_put_failure; } } nla_nest_end(skb, nest); nla_put_failure: return ret; } static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; int ret, i; u8 value; if (!netdev->dcbnl_ops->setfeatcfg) return -ENOTSUPP; if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest); if (ret) goto err; for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value); if (ret) goto err; } err: ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret); return ret; } /* Handle CEE DCBX GET commands. */ static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; if (!ops) return -EOPNOTSUPP; return dcbnl_cee_fill(skb, netdev); } struct reply_func { /* reply netlink message type */ int type; /* function to fill message contents */ int (*cb)(struct net_device *, struct nlmsghdr *, u32, struct nlattr **, struct sk_buff *); }; static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = { [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate }, [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate }, [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg }, [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg }, [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr }, [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap }, [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs }, [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs }, [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate }, [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate }, [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp }, [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp }, [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg }, [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg }, [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg }, [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg }, [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall }, [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg }, [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg }, [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get }, [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set }, [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del }, [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx }, [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx }, [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg }, [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg }, [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get }, }; static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct net_device *netdev; struct dcbmsg *dcb = nlmsg_data(nlh); struct nlattr *tb[DCB_ATTR_MAX + 1]; u32 portid = skb ? NETLINK_CB(skb).portid : 0; int ret = -EINVAL; struct sk_buff *reply_skb; struct nlmsghdr *reply_nlh = NULL; const struct reply_func *fn; if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN)) return -EPERM; ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, dcbnl_rtnl_policy); if (ret < 0) return ret; if (dcb->cmd > DCB_CMD_MAX) return -EINVAL; /* check if a reply function has been defined for the command */ fn = &reply_funcs[dcb->cmd]; if (!fn->cb) return -EOPNOTSUPP; if (!tb[DCB_ATTR_IFNAME]) return -EINVAL; netdev = dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME])); if (!netdev) return -ENODEV; if (!netdev->dcbnl_ops) { ret = -EOPNOTSUPP; goto out; } reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq, nlh->nlmsg_flags, &reply_nlh); if (!reply_skb) { ret = -ENOBUFS; goto out; } ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb); if (ret < 0) { nlmsg_free(reply_skb); goto out; } nlmsg_end(reply_skb, reply_nlh); ret = rtnl_unicast(reply_skb, net, portid); out: dev_put(netdev); return ret; } static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, int ifindex, int prio) { struct dcb_app_type *itr; list_for_each_entry(itr, &dcb_app_list, list) { if (itr->app.selector == app->selector && itr->app.protocol == app->protocol && itr->ifindex == ifindex && (!prio || itr->app.priority == prio)) return itr; } return NULL; } static int dcb_app_add(const struct dcb_app *app, int ifindex) { struct dcb_app_type *entry; entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return -ENOMEM; memcpy(&entry->app, app, sizeof(*app)); entry->ifindex = ifindex; list_add(&entry->list, &dcb_app_list); return 0; } /** * dcb_getapp - retrieve the DCBX application user priority * * On success returns a non-zero 802.1p user priority bitmap * otherwise returns 0 as the invalid user priority bitmap to * indicate an error. */ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u8 prio = 0; spin_lock(&dcb_lock); if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) prio = itr->app.priority; spin_unlock(&dcb_lock); return prio; } EXPORT_SYMBOL(dcb_getapp); /** * dcb_setapp - add CEE dcb application data to app list * * Priority 0 is an invalid priority in CEE spec. This routine * removes applications from the app list if the priority is * set to zero. */ int dcb_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type *itr; struct dcb_app_type event; int err = 0; event.ifindex = dev->ifindex; memcpy(&event.app, new, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock(&dcb_lock); /* Search for existing match and replace */ if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { if (new->priority) itr->app.priority = new->priority; else { list_del(&itr->list); kfree(itr); } goto out; } /* App type does not exist add new application type */ if (new->priority) err = dcb_app_add(new, dev->ifindex); out: spin_unlock(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_setapp); /** * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority * * Helper routine which on success returns a non-zero 802.1Qaz user * priority bitmap otherwise returns 0 to indicate the dcb_app was * not found in APP list. */ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u8 prio = 0; spin_lock(&dcb_lock); if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) prio |= 1 << itr->app.priority; spin_unlock(&dcb_lock); return prio; } EXPORT_SYMBOL(dcb_ieee_getapp_mask); /** * dcb_ieee_setapp - add IEEE dcb application data to app list * * This adds Application data to the list. Multiple application * entries may exists for the same selector and protocol as long * as the priorities are different. */ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type event; int err = 0; event.ifindex = dev->ifindex; memcpy(&event.app, new, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock(&dcb_lock); /* Search for existing match and abort if found */ if (dcb_app_lookup(new, dev->ifindex, new->priority)) { err = -EEXIST; goto out; } err = dcb_app_add(new, dev->ifindex); out: spin_unlock(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_ieee_setapp); /** * dcb_ieee_delapp - delete IEEE dcb application data from list * * This removes a matching APP data from the APP list */ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) { struct dcb_app_type *itr; struct dcb_app_type event; int err = -ENOENT; event.ifindex = dev->ifindex; memcpy(&event.app, del, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock(&dcb_lock); /* Search for existing match and remove it. */ if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { list_del(&itr->list); kfree(itr); err = 0; } spin_unlock(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_ieee_delapp); static void dcb_flushapp(void) { struct dcb_app_type *app; struct dcb_app_type *tmp; spin_lock(&dcb_lock); list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { list_del(&app->list); kfree(app); } spin_unlock(&dcb_lock); } static int __init dcbnl_init(void) { INIT_LIST_HEAD(&dcb_app_list); rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL); return 0; } module_init(dcbnl_init); static void __exit dcbnl_exit(void) { rtnl_unregister(PF_UNSPEC, RTM_GETDCB); rtnl_unregister(PF_UNSPEC, RTM_SETDCB); dcb_flushapp(); } module_exit(dcbnl_exit);
./CrossVul/dataset_final_sorted/CWE-399/c/good_5670_0
crossvul-cpp_data_good_3463_0
/* * Monkey's Audio APE demuxer * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org> * based upon libdemac from Dave Chapman. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include "libavutil/intreadwrite.h" #include "avformat.h" #include "apetag.h" #define ENABLE_DEBUG 0 /* The earliest and latest file formats supported by this library */ #define APE_MIN_VERSION 3950 #define APE_MAX_VERSION 3990 #define MAC_FORMAT_FLAG_8_BIT 1 // is 8-bit [OBSOLETE] #define MAC_FORMAT_FLAG_CRC 2 // uses the new CRC32 error detection [OBSOLETE] #define MAC_FORMAT_FLAG_HAS_PEAK_LEVEL 4 // uint32 nPeakLevel after the header [OBSOLETE] #define MAC_FORMAT_FLAG_24_BIT 8 // is 24-bit [OBSOLETE] #define MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS 16 // has the number of seek elements after the peak level #define MAC_FORMAT_FLAG_CREATE_WAV_HEADER 32 // create the wave header on decompression (not stored) #define MAC_SUBFRAME_SIZE 4608 #define APE_EXTRADATA_SIZE 6 typedef struct { int64_t pos; int nblocks; int size; int skip; int64_t pts; } APEFrame; typedef struct { /* Derived fields */ uint32_t junklength; uint32_t firstframe; uint32_t totalsamples; int currentframe; APEFrame *frames; /* Info from Descriptor Block */ char magic[4]; int16_t fileversion; int16_t padding1; uint32_t descriptorlength; uint32_t headerlength; uint32_t seektablelength; uint32_t wavheaderlength; uint32_t audiodatalength; uint32_t audiodatalength_high; uint32_t wavtaillength; uint8_t md5[16]; /* Info from Header Block */ uint16_t compressiontype; uint16_t formatflags; uint32_t blocksperframe; uint32_t finalframeblocks; uint32_t totalframes; uint16_t bps; uint16_t channels; uint32_t samplerate; /* Seektable */ uint32_t *seektable; } APEContext; static int ape_probe(AVProbeData * p) { if (p->buf[0] == 'M' && p->buf[1] == 'A' && p->buf[2] == 'C' && p->buf[3] == ' ') return AVPROBE_SCORE_MAX; return 0; } static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx) { #if ENABLE_DEBUG int i; av_log(s, AV_LOG_DEBUG, "Descriptor Block:\n\n"); av_log(s, AV_LOG_DEBUG, "magic = \"%c%c%c%c\"\n", ape_ctx->magic[0], ape_ctx->magic[1], ape_ctx->magic[2], ape_ctx->magic[3]); av_log(s, AV_LOG_DEBUG, "fileversion = %d\n", ape_ctx->fileversion); av_log(s, AV_LOG_DEBUG, "descriptorlength = %d\n", ape_ctx->descriptorlength); av_log(s, AV_LOG_DEBUG, "headerlength = %d\n", ape_ctx->headerlength); av_log(s, AV_LOG_DEBUG, "seektablelength = %d\n", ape_ctx->seektablelength); av_log(s, AV_LOG_DEBUG, "wavheaderlength = %d\n", ape_ctx->wavheaderlength); av_log(s, AV_LOG_DEBUG, "audiodatalength = %d\n", ape_ctx->audiodatalength); av_log(s, AV_LOG_DEBUG, "audiodatalength_high = %d\n", ape_ctx->audiodatalength_high); av_log(s, AV_LOG_DEBUG, "wavtaillength = %d\n", ape_ctx->wavtaillength); av_log(s, AV_LOG_DEBUG, "md5 = "); for (i = 0; i < 16; i++) av_log(s, AV_LOG_DEBUG, "%02x", ape_ctx->md5[i]); av_log(s, AV_LOG_DEBUG, "\n"); av_log(s, AV_LOG_DEBUG, "\nHeader Block:\n\n"); av_log(s, AV_LOG_DEBUG, "compressiontype = %d\n", ape_ctx->compressiontype); av_log(s, AV_LOG_DEBUG, "formatflags = %d\n", ape_ctx->formatflags); av_log(s, AV_LOG_DEBUG, "blocksperframe = %d\n", ape_ctx->blocksperframe); av_log(s, AV_LOG_DEBUG, "finalframeblocks = %d\n", ape_ctx->finalframeblocks); av_log(s, AV_LOG_DEBUG, "totalframes = %d\n", ape_ctx->totalframes); av_log(s, AV_LOG_DEBUG, "bps = %d\n", ape_ctx->bps); av_log(s, AV_LOG_DEBUG, "channels = %d\n", ape_ctx->channels); av_log(s, AV_LOG_DEBUG, "samplerate = %d\n", ape_ctx->samplerate); av_log(s, AV_LOG_DEBUG, "\nSeektable\n\n"); if ((ape_ctx->seektablelength / sizeof(uint32_t)) != ape_ctx->totalframes) { av_log(s, AV_LOG_DEBUG, "No seektable\n"); } else { for (i = 0; i < ape_ctx->seektablelength / sizeof(uint32_t); i++) { if (i < ape_ctx->totalframes - 1) { av_log(s, AV_LOG_DEBUG, "%8d %d (%d bytes)\n", i, ape_ctx->seektable[i], ape_ctx->seektable[i + 1] - ape_ctx->seektable[i]); } else { av_log(s, AV_LOG_DEBUG, "%8d %d\n", i, ape_ctx->seektable[i]); } } } av_log(s, AV_LOG_DEBUG, "\nFrames\n\n"); for (i = 0; i < ape_ctx->totalframes; i++) av_log(s, AV_LOG_DEBUG, "%8d %8lld %8d (%d samples)\n", i, ape_ctx->frames[i].pos, ape_ctx->frames[i].size, ape_ctx->frames[i].nblocks); av_log(s, AV_LOG_DEBUG, "\nCalculated information:\n\n"); av_log(s, AV_LOG_DEBUG, "junklength = %d\n", ape_ctx->junklength); av_log(s, AV_LOG_DEBUG, "firstframe = %d\n", ape_ctx->firstframe); av_log(s, AV_LOG_DEBUG, "totalsamples = %d\n", ape_ctx->totalsamples); #endif } static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap) { AVIOContext *pb = s->pb; APEContext *ape = s->priv_data; AVStream *st; uint32_t tag; int i; int total_blocks; int64_t pts; /* TODO: Skip any leading junk such as id3v2 tags */ ape->junklength = 0; tag = avio_rl32(pb); if (tag != MKTAG('M', 'A', 'C', ' ')) return -1; ape->fileversion = avio_rl16(pb); if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) { av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10); return -1; } if (ape->fileversion >= 3980) { ape->padding1 = avio_rl16(pb); ape->descriptorlength = avio_rl32(pb); ape->headerlength = avio_rl32(pb); ape->seektablelength = avio_rl32(pb); ape->wavheaderlength = avio_rl32(pb); ape->audiodatalength = avio_rl32(pb); ape->audiodatalength_high = avio_rl32(pb); ape->wavtaillength = avio_rl32(pb); avio_read(pb, ape->md5, 16); /* Skip any unknown bytes at the end of the descriptor. This is for future compatibility */ if (ape->descriptorlength > 52) avio_seek(pb, ape->descriptorlength - 52, SEEK_CUR); /* Read header data */ ape->compressiontype = avio_rl16(pb); ape->formatflags = avio_rl16(pb); ape->blocksperframe = avio_rl32(pb); ape->finalframeblocks = avio_rl32(pb); ape->totalframes = avio_rl32(pb); ape->bps = avio_rl16(pb); ape->channels = avio_rl16(pb); ape->samplerate = avio_rl32(pb); } else { ape->descriptorlength = 0; ape->headerlength = 32; ape->compressiontype = avio_rl16(pb); ape->formatflags = avio_rl16(pb); ape->channels = avio_rl16(pb); ape->samplerate = avio_rl32(pb); ape->wavheaderlength = avio_rl32(pb); ape->wavtaillength = avio_rl32(pb); ape->totalframes = avio_rl32(pb); ape->finalframeblocks = avio_rl32(pb); if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) { avio_seek(pb, 4, SEEK_CUR); /* Skip the peak level */ ape->headerlength += 4; } if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) { ape->seektablelength = avio_rl32(pb); ape->headerlength += 4; ape->seektablelength *= sizeof(int32_t); } else ape->seektablelength = ape->totalframes * sizeof(int32_t); if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT) ape->bps = 8; else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT) ape->bps = 24; else ape->bps = 16; if (ape->fileversion >= 3950) ape->blocksperframe = 73728 * 4; else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800 && ape->compressiontype >= 4000)) ape->blocksperframe = 73728; else ape->blocksperframe = 9216; /* Skip any stored wav header */ if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER)) avio_seek(pb, ape->wavheaderlength, SEEK_CUR); } if(!ape->totalframes){ av_log(s, AV_LOG_ERROR, "No frames in the file!\n"); return AVERROR(EINVAL); } if(ape->totalframes > UINT_MAX / sizeof(APEFrame)){ av_log(s, AV_LOG_ERROR, "Too many frames: %d\n", ape->totalframes); return -1; } ape->frames = av_malloc(ape->totalframes * sizeof(APEFrame)); if(!ape->frames) return AVERROR(ENOMEM); ape->firstframe = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength; ape->currentframe = 0; ape->totalsamples = ape->finalframeblocks; if (ape->totalframes > 1) ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1); if (ape->seektablelength > 0) { ape->seektable = av_malloc(ape->seektablelength); for (i = 0; i < ape->seektablelength / sizeof(uint32_t); i++) ape->seektable[i] = avio_rl32(pb); } ape->frames[0].pos = ape->firstframe; ape->frames[0].nblocks = ape->blocksperframe; ape->frames[0].skip = 0; for (i = 1; i < ape->totalframes; i++) { ape->frames[i].pos = ape->seektable[i]; //ape->frames[i-1].pos + ape->blocksperframe; ape->frames[i].nblocks = ape->blocksperframe; ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos; ape->frames[i].skip = (ape->frames[i].pos - ape->frames[0].pos) & 3; } ape->frames[ape->totalframes - 1].size = ape->finalframeblocks * 4; ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks; for (i = 0; i < ape->totalframes; i++) { if(ape->frames[i].skip){ ape->frames[i].pos -= ape->frames[i].skip; ape->frames[i].size += ape->frames[i].skip; } ape->frames[i].size = (ape->frames[i].size + 3) & ~3; } ape_dumpinfo(s, ape); /* try to read APE tags */ if (!url_is_streamed(pb)) { ff_ape_parse_tag(s); avio_seek(pb, 0, SEEK_SET); } av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10, ape->compressiontype); /* now we are ready: build format streams */ st = av_new_stream(s, 0); if (!st) return -1; total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_APE; st->codec->codec_tag = MKTAG('A', 'P', 'E', ' '); st->codec->channels = ape->channels; st->codec->sample_rate = ape->samplerate; st->codec->bits_per_coded_sample = ape->bps; st->codec->frame_size = MAC_SUBFRAME_SIZE; st->nb_frames = ape->totalframes; st->start_time = 0; st->duration = total_blocks / MAC_SUBFRAME_SIZE; av_set_pts_info(st, 64, MAC_SUBFRAME_SIZE, ape->samplerate); st->codec->extradata = av_malloc(APE_EXTRADATA_SIZE); st->codec->extradata_size = APE_EXTRADATA_SIZE; AV_WL16(st->codec->extradata + 0, ape->fileversion); AV_WL16(st->codec->extradata + 2, ape->compressiontype); AV_WL16(st->codec->extradata + 4, ape->formatflags); pts = 0; for (i = 0; i < ape->totalframes; i++) { ape->frames[i].pts = pts; av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME); pts += ape->blocksperframe / MAC_SUBFRAME_SIZE; } return 0; } static int ape_read_packet(AVFormatContext * s, AVPacket * pkt) { int ret; int nblocks; APEContext *ape = s->priv_data; uint32_t extra_size = 8; if (s->pb->eof_reached) return AVERROR(EIO); if (ape->currentframe > ape->totalframes) return AVERROR(EIO); avio_seek (s->pb, ape->frames[ape->currentframe].pos, SEEK_SET); /* Calculate how many blocks there are in this frame */ if (ape->currentframe == (ape->totalframes - 1)) nblocks = ape->finalframeblocks; else nblocks = ape->blocksperframe; if (av_new_packet(pkt, ape->frames[ape->currentframe].size + extra_size) < 0) return AVERROR(ENOMEM); AV_WL32(pkt->data , nblocks); AV_WL32(pkt->data + 4, ape->frames[ape->currentframe].skip); ret = avio_read(s->pb, pkt->data + extra_size, ape->frames[ape->currentframe].size); pkt->pts = ape->frames[ape->currentframe].pts; pkt->stream_index = 0; /* note: we need to modify the packet size here to handle the last packet */ pkt->size = ret + extra_size; ape->currentframe++; return 0; } static int ape_read_close(AVFormatContext * s) { APEContext *ape = s->priv_data; av_freep(&ape->frames); av_freep(&ape->seektable); return 0; } static int ape_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { AVStream *st = s->streams[stream_index]; APEContext *ape = s->priv_data; int index = av_index_search_timestamp(st, timestamp, flags); if (index < 0) return -1; ape->currentframe = index; return 0; } AVInputFormat ff_ape_demuxer = { "ape", NULL_IF_CONFIG_SMALL("Monkey's Audio"), sizeof(APEContext), ape_probe, ape_read_header, ape_read_packet, ape_read_close, ape_read_seek, .extensions = "ape,apl,mac" };
./CrossVul/dataset_final_sorted/CWE-399/c/good_3463_0
crossvul-cpp_data_good_3486_5
/* * linux/arch/arm/kernel/swp_emulate.c * * Copyright (C) 2009 ARM Limited * __user_* functions adapted from include/asm/uaccess.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Implements emulation of the SWP/SWPB instructions using load-exclusive and * store-exclusive for processors that have them disabled (or future ones that * might not implement them). * * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>] * Where: Rt = destination * Rt2 = source * Rn = address */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/perf_event.h> #include <asm/traps.h> #include <asm/uaccess.h> /* * Error-checking SWP macros implemented using ldrex{b}/strex{b} */ #define __user_swpX_asm(data, addr, res, temp, B) \ __asm__ __volatile__( \ " mov %2, %1\n" \ "0: ldrex"B" %1, [%3]\n" \ "1: strex"B" %0, %2, [%3]\n" \ " cmp %0, #0\n" \ " movne %0, %4\n" \ "2:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %5\n" \ " b 2b\n" \ " .previous\n" \ " .section __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 0b, 3b\n" \ " .long 1b, 3b\n" \ " .previous" \ : "=&r" (res), "+r" (data), "=&r" (temp) \ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ : "cc", "memory") #define __user_swp_asm(data, addr, res, temp) \ __user_swpX_asm(data, addr, res, temp, "") #define __user_swpb_asm(data, addr, res, temp) \ __user_swpX_asm(data, addr, res, temp, "b") /* * Macros/defines for extracting register numbers from instruction. */ #define EXTRACT_REG_NUM(instruction, offset) \ (((instruction) & (0xf << (offset))) >> (offset)) #define RN_OFFSET 16 #define RT_OFFSET 12 #define RT2_OFFSET 0 /* * Bit 22 of the instruction encoding distinguishes between * the SWP and SWPB variants (bit set means SWPB). */ #define TYPE_SWPB (1 << 22) static unsigned long swpcounter; static unsigned long swpbcounter; static unsigned long abtcounter; static pid_t previous_pid; #ifdef CONFIG_PROC_FS static int proc_read_status(char *page, char **start, off_t off, int count, int *eof, void *data) { char *p = page; int len; p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter); p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter); p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter); if (previous_pid != 0) p += sprintf(p, "Last process:\t\t%d\n", previous_pid); len = (p - page) - off; if (len < 0) len = 0; *eof = (len <= count) ? 1 : 0; *start = page + off; return len; } #endif /* * Set up process info to signal segmentation fault - called on access error. */ static void set_segfault(struct pt_regs *regs, unsigned long addr) { siginfo_t info; if (find_vma(current->mm, addr) == NULL) info.si_code = SEGV_MAPERR; else info.si_code = SEGV_ACCERR; info.si_signo = SIGSEGV; info.si_errno = 0; info.si_addr = (void *) instruction_pointer(regs); pr_debug("SWP{B} emulation: access caused memory abort!\n"); arm_notify_die("Illegal memory access", regs, &info, 0, 0); abtcounter++; } static int emulate_swpX(unsigned int address, unsigned int *data, unsigned int type) { unsigned int res = 0; if ((type != TYPE_SWPB) && (address & 0x3)) { /* SWP to unaligned address not permitted */ pr_debug("SWP instruction on unaligned pointer!\n"); return -EFAULT; } while (1) { unsigned long temp; /* * Barrier required between accessing protected resource and * releasing a lock for it. Legacy code might not have done * this, and we cannot determine that this is not the case * being emulated, so insert always. */ smp_mb(); if (type == TYPE_SWPB) __user_swpb_asm(*data, address, res, temp); else __user_swp_asm(*data, address, res, temp); if (likely(res != -EAGAIN) || signal_pending(current)) break; cond_resched(); } if (res == 0) { /* * Barrier also required between acquiring a lock for a * protected resource and accessing the resource. Inserted for * same reason as above. */ smp_mb(); if (type == TYPE_SWPB) swpbcounter++; else swpcounter++; } return res; } /* * swp_handler logs the id of calling process, dissects the instruction, sanity * checks the memory location, calls emulate_swpX for the actual operation and * deals with fixup/error handling before returning */ static int swp_handler(struct pt_regs *regs, unsigned int instr) { unsigned int address, destreg, data, type; unsigned int res = 0; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); if (current->pid != previous_pid) { pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", current->comm, (unsigned long)current->pid); previous_pid = current->pid; } address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)]; data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)]; destreg = EXTRACT_REG_NUM(instr, RT_OFFSET); type = instr & TYPE_SWPB; pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n", EXTRACT_REG_NUM(instr, RN_OFFSET), address, destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); /* Check access in reasonable access range for both SWP and SWPB */ if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { pr_debug("SWP{B} emulation: access to %p not allowed!\n", (void *)address); res = -EFAULT; } else { res = emulate_swpX(address, &data, type); } if (res == 0) { /* * On successful emulation, revert the adjustment to the PC * made in kernel/traps.c in order to resume execution at the * instruction following the SWP{B}. */ regs->ARM_pc += 4; regs->uregs[destreg] = data; } else if (res == -EFAULT) { /* * Memory errors do not mean emulation failed. * Set up signal info to return SEGV, then return OK */ set_segfault(regs, address); } return 0; } /* * Only emulate SWP/SWPB executed in ARM state/User mode. * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE. */ static struct undef_hook swp_hook = { .instr_mask = 0x0fb00ff0, .instr_val = 0x01000090, .cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT, .cpsr_val = USR_MODE, .fn = swp_handler }; /* * Register handler and create status file in /proc/cpu * Invoked as late_initcall, since not needed before init spawned. */ static int __init swp_emulation_init(void) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *res; res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL); if (!res) return -ENOMEM; res->read_proc = proc_read_status; #endif /* CONFIG_PROC_FS */ printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n"); register_undef_hook(&swp_hook); return 0; } late_initcall(swp_emulation_init);
./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_5
crossvul-cpp_data_good_5721_2
/* * UDP over IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/ipv4/udp.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/init.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/ip6_route.h> #include <net/raw.h> #include <net/tcp_states.h> #include <net/ip6_checksum.h> #include <net/xfrm.h> #include <net/inet6_hashtables.h> #include <net/ll_poll.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <trace/events/skb.h> #include "udp_impl.h" int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) { const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); __be32 sk1_rcv_saddr = sk_rcv_saddr(sk); __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); int sk_ipv6only = ipv6_only_sock(sk); int sk2_ipv6only = inet_v6_ipv6only(sk2); int addr_type = ipv6_addr_type(sk_rcv_saddr6); int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; /* if both are mapped, treat as IPv4 */ if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) return (!sk2_ipv6only && (!sk1_rcv_saddr || !sk2_rcv_saddr || sk1_rcv_saddr == sk2_rcv_saddr)); if (addr_type2 == IPV6_ADDR_ANY && !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) return 1; if (addr_type == IPV6_ADDR_ANY && !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) return 1; if (sk2_rcv_saddr6 && ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6)) return 1; return 0; } static unsigned int udp6_portaddr_hash(struct net *net, const struct in6_addr *addr6, unsigned int port) { unsigned int hash, mix = net_hash_mix(net); if (ipv6_addr_any(addr6)) hash = jhash_1word(0, mix); else if (ipv6_addr_v4mapped(addr6)) hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); else hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); return hash ^ port; } int udp_v6_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum); unsigned int hash2_partial = udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); } static void udp_v6_rehash(struct sock *sk) { u16 new_hash = udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, inet_sk(sk)->inet_num); udp_lib_rehash(sk, new_hash); } static inline int compute_score(struct sock *sk, struct net *net, unsigned short hnum, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); score = 0; if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score++; } if (!ipv6_addr_any(&np->rcv_saddr)) { if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score++; } if (!ipv6_addr_any(&np->daddr)) { if (!ipv6_addr_equal(&np->daddr, saddr)) return -1; score++; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score++; } } return score; } #define SCORE2_MAX (1 + 1 + 1) static inline int compute_score2(struct sock *sk, struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned short hnum, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score = 0; if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score++; } if (!ipv6_addr_any(&np->daddr)) { if (!ipv6_addr_equal(&np->daddr, saddr)) return -1; score++; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score++; } } return score; } /* called with read_rcu_lock() */ static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned int hnum, int dif, struct udp_hslot *hslot2, unsigned int slot2) { struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; begin: result = NULL; badness = -1; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } else if (score == SCORE2_MAX) goto exact_match; } else if (score == badness && reuseport) { matches++; if (((u64)hash * matches) >> 32 == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot2) goto begin; if (result) { exact_match: if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score2(result, net, saddr, sport, daddr, hnum, dif) < badness)) { sock_put(result); goto begin; } } return result; } struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif, struct udp_table *udptable) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; rcu_read_lock(); if (hslot->count > 10) { hash2 = udp6_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, slot2); if (!result) { hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, &in6addr_any, hnum, dif, hslot2, slot2); } rcu_read_unlock(); return result; } begin: result = NULL; badness = -1; sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } } else if (score == badness && reuseport) { matches++; if (((u64)hash * matches) >> 32 == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score(result, net, hnum, saddr, sport, daddr, dport, dif) < badness)) { sock_put(result); goto begin; } } rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(__udp6_lib_lookup); static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { struct sock *sk; const struct ipv6hdr *iph = ipv6_hdr(skb); if (unlikely(sk = skb_steal_sock(skb))) return sk; return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), udptable); } struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); } EXPORT_SYMBOL_GPL(udp6_lib_lookup); /* * This should be easy, if there is something there we * return it, otherwise we block. */ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); int is_udp4; bool slow; if (addr_len) *addr_len = sizeof(struct sockaddr_in6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; is_udp4 = (skb->protocol == htons(ETH_P_IP)); /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udpv6_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) { if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); } sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (msg->msg_name) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *) msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = udp_hdr(skb)->source; sin6->sin6_flowinfo = 0; if (is_udp4) { ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin6->sin6_addr); sin6->sin6_scope_id = 0; } else { sin6->sin6_addr = ipv6_hdr(skb)->saddr; sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); } } if (is_udp4) { if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); } else { if (np->rxopt.all) ip6_datagram_recv_ctl(sk, msg, skb); } err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { if (is_udp4) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } else { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; } void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info, struct udp_table *udptable) { struct ipv6_pinfo *np; const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct in6_addr *saddr = &hdr->saddr; const struct in6_addr *daddr = &hdr->daddr; struct udphdr *uh = (struct udphdr*)(skb->data+offset); struct sock *sk; int err; sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest, saddr, uh->source, inet6_iif(skb), udptable); if (sk == NULL) return; if (type == ICMPV6_PKT_TOOBIG) ip6_sk_update_pmtu(skb, sk, info); if (type == NDISC_REDIRECT) ip6_sk_redirect(skb, sk); np = inet6_sk(sk); if (!icmpv6_err_convert(type, code, &err) && !np->recverr) goto out; if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) goto out; if (np->recverr) ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) sock_rps_save_rxhash(sk, skb); rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); return -1; } return 0; } static __inline__ void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info ) { __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); } static struct static_key udpv6_encap_needed __read_mostly; void udpv6_encap_enable(void) { if (!static_key_enabled(&udpv6_encap_needed)) static_key_slow_inc(&udpv6_encap_needed); } EXPORT_SYMBOL(udpv6_encap_enable); int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int rc; int is_udplite = IS_UDPLITE(sk); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; if (static_key_false(&udpv6_encap_needed) && up->encap_type) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* * This is an encapsulation socket so pass the skb to * the socket's udp_encap_rcv() hook. Otherwise, just * fall through and pass this up the UDP socket. * up->encap_rcv() returns the following value: * =0 if skb was successfully passed to the encap * handler or was discarded by it. * >0 if skb should be passed on to UDP. * <0 if skb should be resubmitted as proto -N */ /* if we're overly short, let UDP handle it */ encap_rcv = ACCESS_ONCE(up->encap_rcv); if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { int ret; ret = encap_rcv(sk, skb); if (ret <= 0) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); return -ret; } } /* FALLTHROUGH -- it's a UDP Packet */ } /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { if (up->pcrlen == 0) { /* full coverage was set */ LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage" " %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d " "too small, need min %d\n", UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } if (rcu_access_pointer(sk->sk_filter)) { if (udp_lib_checksum_complete(skb)) goto csum_error; } if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) goto drop; skb_dst_drop(skb); bh_lock_sock(sk); rc = 0; if (!sock_owned_by_user(sk)) rc = __udpv6_queue_rcv_skb(sk, skb); else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); goto drop; } bh_unlock_sock(sk); return rc; csum_error: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; } static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, __be16 loc_port, const struct in6_addr *loc_addr, __be16 rmt_port, const struct in6_addr *rmt_addr, int dif) { struct hlist_nulls_node *node; struct sock *s = sk; unsigned short num = ntohs(loc_port); sk_nulls_for_each_from(s, node) { struct inet_sock *inet = inet_sk(s); if (!net_eq(sock_net(s), net)) continue; if (udp_sk(s)->udp_port_hash == num && s->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(s); if (inet->inet_dport) { if (inet->inet_dport != rmt_port) continue; } if (!ipv6_addr_any(&np->daddr) && !ipv6_addr_equal(&np->daddr, rmt_addr)) continue; if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif) continue; if (!ipv6_addr_any(&np->rcv_saddr)) { if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr)) continue; } if (!inet6_mc_check(s, loc_addr, rmt_addr)) continue; return s; } } return NULL; } static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { struct sk_buff *skb1 = NULL; struct sock *sk; unsigned int i; for (i = 0; i < count; i++) { sk = stack[i]; if (likely(skb1 == NULL)) skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { atomic_inc(&sk->sk_drops); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); } if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0) skb1 = NULL; } if (unlikely(skb1)) kfree_skb(skb1); } /* * Note: called only from the BH handler context, * so we don't need to lock the hashes. */ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, struct udp_table *udptable) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; const struct udphdr *uh = udp_hdr(skb); struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); int dif; unsigned int i, count = 0; spin_lock(&hslot->lock); sk = sk_nulls_head(&hslot->head); dif = inet6_iif(skb); sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); while (sk) { stack[count++] = sk; sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, uh->source, saddr, dif); if (unlikely(count == ARRAY_SIZE(stack))) { if (!sk) break; flush_stack(stack, count, skb, ~0); count = 0; } } /* * before releasing the lock, we must take reference on sockets */ for (i = 0; i < count; i++) sock_hold(stack[i]); spin_unlock(&hslot->lock); if (count) { flush_stack(stack, count, skb, count - 1); for (i = 0; i < count; i++) sock_put(stack[i]); } else { kfree_skb(skb); } return 0; } int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct net *net = dev_net(skb->dev); struct sock *sk; struct udphdr *uh; const struct in6_addr *saddr, *daddr; u32 ulen = 0; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto discard; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); ulen = ntohs(uh->len); if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ /* Check for jumbo payload */ if (ulen == 0) ulen = skb->len; if (ulen < sizeof(*uh)) goto short_packet; if (ulen < skb->len) { if (pskb_trim_rcsum(skb, ulen)) goto short_packet; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); } } if (udp6_csum_init(skb, uh, proto)) goto csum_error; /* * Multicast receive code */ if (ipv6_addr_is_multicast(daddr)) return __udp6_lib_mcast_deliver(net, skb, saddr, daddr, udptable); /* Unicast */ /* * check socket cache ... must talk to Alan about his plans * for sock caches... i'll skip this for now. */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk != NULL) { int ret; sk_mark_ll(sk, skb); ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; if (udp_lib_checksum_complete(skb)) goto csum_error; UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); return 0; short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", proto == IPPROTO_UDPLITE ? "-Lite" : "", saddr, ntohs(uh->source), ulen, skb->len, daddr, ntohs(uh->dest)); goto discard; csum_error: UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); discard: UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } static __inline__ int udpv6_rcv(struct sk_buff *skb) { return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); } /* * Throw away all pending data and cancel the corking. Socket is locked. */ static void udp_v6_flush_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); if (up->pending == AF_INET) udp_flush_pending_frames(sk); else if (up->pending) { up->len = 0; up->pending = 0; ip6_flush_pending_frames(sk); } } /** * udp6_hwcsum_outgoing - handle outgoing HW checksumming * @sk: socket we are sending on * @skb: sk_buff containing the filled-in UDP header * (checksum field must be zeroed out) */ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, int len) { unsigned int offset; struct udphdr *uh = udp_hdr(skb); __wsum csum = 0; if (skb_queue_len(&sk->sk_write_queue) == 1) { /* Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); } else { /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); skb->ip_summed = CHECKSUM_NONE; skb_queue_walk(&sk->sk_write_queue, skb) { csum = csum_add(csum, skb->csum); } uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } } /* * Sending */ static int udp_v6_push_pending_frames(struct sock *sk) { struct sk_buff *skb; struct udphdr *uh; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi6 *fl6; int err = 0; int is_udplite = IS_UDPLITE(sk); __wsum csum = 0; if (up->pending == AF_INET) return udp_push_pending_frames(sk); fl6 = &inet->cork.fl.u.ip6; /* Grab the skbuff where UDP header space exists. */ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = fl6->fl6_sport; uh->dest = fl6->fl6_dport; uh->len = htons(up->len); uh->check = 0; if (is_udplite) csum = udplite_csum_outgoing(sk, skb); else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, up->len); goto send; } else csum = udp_csum_outgoing(sk, skb); /* add protocol-dependent pseudo-header */ uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, up->len, fl6->flowi6_proto, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip6_push_pending_frames(sk); if (err) { if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); out: up->len = 0; up->pending = 0; return err; } int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name; struct in6_addr *daddr, *final_p, final; struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct flowi6 fl6; struct dst_entry *dst; int addr_len = msg->msg_namelen; int ulen = len; int hlimit = -1; int tclass = -1; int dontfrag = -1; int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int err; int connected = 0; int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); /* destination address check */ if (sin6) { if (addr_len < offsetof(struct sockaddr, sa_data)) return -EINVAL; switch (sin6->sin6_family) { case AF_INET6: if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; daddr = &sin6->sin6_addr; break; case AF_INET: goto do_udp_sendmsg; case AF_UNSPEC: msg->msg_name = sin6 = NULL; msg->msg_namelen = addr_len = 0; daddr = NULL; break; default: return -EINVAL; } } else if (!up->pending) { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = &np->daddr; } else daddr = NULL; if (daddr) { if (ipv6_addr_v4mapped(daddr)) { struct sockaddr_in sin; sin.sin_family = AF_INET; sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; sin.sin_addr.s_addr = daddr->s6_addr32[3]; msg->msg_name = &sin; msg->msg_namelen = sizeof(sin); do_udp_sendmsg: if (__ipv6_only_sock(sk)) return -ENETUNREACH; return udp_sendmsg(iocb, sk, msg, len); } } if (up->pending == AF_INET) return udp_sendmsg(iocb, sk, msg, len); /* Rough check on arithmetic overflow, better check is made in ip6_append_data(). */ if (len > INT_MAX - sizeof(struct udphdr)) return -EMSGSIZE; if (up->pending) { /* * There are pending frames. * The socket lock must be held while it's corked. */ lock_sock(sk); if (likely(up->pending)) { if (unlikely(up->pending != AF_INET6)) { release_sock(sk); return -EAFNOSUPPORT; } dst = NULL; goto do_append_data; } release_sock(sk); } ulen += sizeof(struct udphdr); memset(&fl6, 0, sizeof(fl6)); if (sin6) { if (sin6->sin6_port == 0) return -EINVAL; fl6.fl6_dport = sin6->sin6_port; daddr = &sin6->sin6_addr; if (np->sndflow) { fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; daddr = &flowlabel->dst; } } /* * Otherwise it will be difficult to maintain * sk->sk_dst_cache. */ if (sk->sk_state == TCP_ESTABLISHED && ipv6_addr_equal(daddr, &np->daddr)) daddr = &np->daddr; if (addr_len >= sizeof(struct sockaddr_in6) && sin6->sin6_scope_id && __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) fl6.flowi6_oif = sin6->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; fl6.fl6_dport = inet->inet_dport; daddr = &np->daddr; fl6.flowlabel = np->flow_label; connected = 1; } if (!fl6.flowi6_oif) fl6.flowi6_oif = sk->sk_bound_dev_if; if (!fl6.flowi6_oif) fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; fl6.flowi6_mark = sk->sk_mark; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(*opt); err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; } if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; } if (!(opt->opt_nflen|opt->opt_flen)) opt = NULL; connected = 0; } if (opt == NULL) opt = np->opt; if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); fl6.flowi6_proto = sk->sk_protocol; if (!ipv6_addr_any(daddr)) fl6.daddr = *daddr; else fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) fl6.saddr = np->saddr; fl6.fl6_sport = inet->inet_sport; final_p = fl6_update_dst(&fl6, opt, &final); if (final_p) connected = 0; if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { fl6.flowi6_oif = np->mcast_oif; connected = 0; } else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, true); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; goto out; } if (hlimit < 0) { if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); } if (tclass < 0) tclass = np->tclass; if (dontfrag < 0) dontfrag = np->dontfrag; if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: lock_sock(sk); if (unlikely(up->pending)) { /* The socket is already corked while preparing it. */ /* ... which is an evident application bug. --ANK */ release_sock(sk); LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); err = -EINVAL; goto out; } up->pending = AF_INET6; do_append_data: up->len += ulen; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, (struct rt6_info*)dst, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); if (err) udp_v6_flush_pending_frames(sk); else if (!corkreq) err = udp_v6_push_pending_frames(sk); else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) up->pending = 0; if (dst) { if (connected) { ip6_dst_store(sk, dst, ipv6_addr_equal(&fl6.daddr, &np->daddr) ? &np->daddr : NULL, #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_equal(&fl6.saddr, &np->saddr) ? &np->saddr : #endif NULL); } else { dst_release(dst); } dst = NULL; } if (err > 0) err = np->recverr ? net_xmit_errno(err) : 0; release_sock(sk); out: dst_release(dst); fl6_sock_release(flowlabel); if (!err) return len; /* * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting * ENOBUFS might not be good (it's not tunable per se), but otherwise * we don't have a good statistic (IpOutDiscards but it can be too many * things). We could add another new stat but at least for now that * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); } return err; do_confirm: dst_confirm(dst); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } void udpv6_destroy_sock(struct sock *sk) { struct udp_sock *up = udp_sk(sk); lock_sock(sk); udp_v6_flush_pending_frames(sk); release_sock(sk); if (static_key_false(&udpv6_encap_needed) && up->encap_type) { void (*encap_destroy)(struct sock *sk); encap_destroy = ACCESS_ONCE(up->encap_destroy); if (encap_destroy) encap_destroy(sk); } inet6_destroy_sock(sk); } /* * Socket option code for UDP */ int udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_v6_push_pending_frames); return ipv6_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_v6_push_pending_frames); return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); } #endif int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return ipv6_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); } #endif static const struct inet6_protocol udpv6_protocol = { .handler = udpv6_rcv, .err_handler = udpv6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS int udp6_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); } else { int bucket = ((struct udp_iter_state *)seq->private)->bucket; struct inet_sock *inet = inet_sk(v); __u16 srcp = ntohs(inet->inet_sport); __u16 destp = ntohs(inet->inet_dport); ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); } return 0; } static const struct file_operations udp6_afinfo_seq_fops = { .owner = THIS_MODULE, .open = udp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct udp_seq_afinfo udp6_seq_afinfo = { .name = "udp6", .family = AF_INET6, .udp_table = &udp_table, .seq_fops = &udp6_afinfo_seq_fops, .seq_ops = { .show = udp6_seq_show, }, }; int __net_init udp6_proc_init(struct net *net) { return udp_proc_register(net, &udp6_seq_afinfo); } void udp6_proc_exit(struct net *net) { udp_proc_unregister(net, &udp6_seq_afinfo); } #endif /* CONFIG_PROC_FS */ void udp_v6_clear_sk(struct sock *sk, int size) { struct inet_sock *inet = inet_sk(sk); /* we do not want to clear pinet6 field, because of RCU lookups */ sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6)); size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); memset(&inet->pinet6 + 1, 0, size); } /* ------------------------------------------------------------------------ */ struct proto udpv6_prot = { .name = "UDPv6", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .destroy = udpv6_destroy_sock, .setsockopt = udpv6_setsockopt, .getsockopt = udpv6_getsockopt, .sendmsg = udpv6_sendmsg, .recvmsg = udpv6_recvmsg, .backlog_rcv = __udpv6_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v6_rehash, .get_port = udp_v6_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, .sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_rmem = &sysctl_udp_rmem_min, .obj_size = sizeof(struct udp6_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .h.udp_table = &udp_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udpv6_setsockopt, .compat_getsockopt = compat_udpv6_getsockopt, #endif .clear_sk = udp_v6_clear_sk, }; static struct inet_protosw udpv6_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_UDP, .prot = &udpv6_prot, .ops = &inet6_dgram_ops, .no_check = UDP_CSUM_DEFAULT, .flags = INET_PROTOSW_PERMANENT, }; int __init udpv6_init(void) { int ret; ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); if (ret) goto out; ret = inet6_register_protosw(&udpv6_protosw); if (ret) goto out_udpv6_protocol; out: return ret; out_udpv6_protocol: inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); goto out; } void udpv6_exit(void) { inet6_unregister_protosw(&udpv6_protosw); inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_5721_2
crossvul-cpp_data_good_2297_2
/* * Copyright (c) Ian F. Darwin 1986-1995. * Software written by Ian F. Darwin and others; * maintained 1995-present by Christos Zoulas and others. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * softmagic - interpret variable magic from MAGIC */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: softmagic.c,v 1.197 2014/11/11 17:48:23 christos Exp $") #endif /* lint */ #include "magic.h" #include <assert.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include <time.h> private int match(struct magic_set *, struct magic *, uint32_t, const unsigned char *, size_t, size_t, int, int, int, int, int *, int *, int *); private int mget(struct magic_set *, const unsigned char *, struct magic *, size_t, size_t, unsigned int, int, int, int, int, int *, int *, int *); private int magiccheck(struct magic_set *, struct magic *); private int32_t mprint(struct magic_set *, struct magic *); private int32_t moffset(struct magic_set *, struct magic *); private void mdebug(uint32_t, const char *, size_t); private int mcopy(struct magic_set *, union VALUETYPE *, int, int, const unsigned char *, uint32_t, size_t, struct magic *); private int mconvert(struct magic_set *, struct magic *, int); private int print_sep(struct magic_set *, int); private int handle_annotation(struct magic_set *, struct magic *); private void cvt_8(union VALUETYPE *, const struct magic *); private void cvt_16(union VALUETYPE *, const struct magic *); private void cvt_32(union VALUETYPE *, const struct magic *); private void cvt_64(union VALUETYPE *, const struct magic *); #define OFFSET_OOB(n, o, i) ((n) < (o) || (i) > ((n) - (o))) #define MAX_RECURSION_LEVEL 10 /* * softmagic - lookup one file in parsed, in-memory copy of database * Passed the name and FILE * of one file to be typed. */ /*ARGSUSED1*/ /* nbytes passed for regularity, maybe need later */ protected int file_softmagic(struct magic_set *ms, const unsigned char *buf, size_t nbytes, size_t level, int mode, int text) { struct mlist *ml; int rv, printed_something = 0, need_separator = 0; for (ml = ms->mlist[0]->next; ml != ms->mlist[0]; ml = ml->next) if ((rv = match(ms, ml->magic, ml->nmagic, buf, nbytes, 0, mode, text, 0, level, &printed_something, &need_separator, NULL)) != 0) return rv; return 0; } #define FILE_FMTDEBUG #ifdef FILE_FMTDEBUG #define F(a, b, c) file_fmtcheck((a), (b), (c), __FILE__, __LINE__) private const char * __attribute__((__format_arg__(3))) file_fmtcheck(struct magic_set *ms, const struct magic *m, const char *def, const char *file, size_t line) { const char *ptr = fmtcheck(m->desc, def); if (ptr == def) file_magerror(ms, "%s, %" SIZE_T_FORMAT "u: format `%s' does not match" " with `%s'", file, line, m->desc, def); return ptr; } #else #define F(a, b, c) fmtcheck((b)->desc, (c)) #endif /* * Go through the whole list, stopping if you find a match. Process all * the continuations of that match before returning. * * We support multi-level continuations: * * At any time when processing a successful top-level match, there is a * current continuation level; it represents the level of the last * successfully matched continuation. * * Continuations above that level are skipped as, if we see one, it * means that the continuation that controls them - i.e, the * lower-level continuation preceding them - failed to match. * * Continuations below that level are processed as, if we see one, * it means we've finished processing or skipping higher-level * continuations under the control of a successful or unsuccessful * lower-level continuation, and are now seeing the next lower-level * continuation and should process it. The current continuation * level reverts to the level of the one we're seeing. * * Continuations at the current level are processed as, if we see * one, there's no lower-level continuation that may have failed. * * If a continuation matches, we bump the current continuation level * so that higher-level continuations are processed. */ private int match(struct magic_set *ms, struct magic *magic, uint32_t nmagic, const unsigned char *s, size_t nbytes, size_t offset, int mode, int text, int flip, int recursion_level, int *printed_something, int *need_separator, int *returnval) { uint32_t magindex = 0; unsigned int cont_level = 0; int returnvalv = 0, e; /* if a match is found it is set to 1*/ int firstline = 1; /* a flag to print X\n X\n- X */ int print = (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0; if (returnval == NULL) returnval = &returnvalv; if (file_check_mem(ms, cont_level) == -1) return -1; for (magindex = 0; magindex < nmagic; magindex++) { int flush = 0; struct magic *m = &magic[magindex]; if (m->type != FILE_NAME) if ((IS_STRING(m->type) && #define FLT (STRING_BINTEST | STRING_TEXTTEST) ((text && (m->str_flags & FLT) == STRING_BINTEST) || (!text && (m->str_flags & FLT) == STRING_TEXTTEST))) || (m->flag & mode) != mode) { /* Skip sub-tests */ while (magindex + 1 < nmagic && magic[magindex + 1].cont_level != 0 && ++magindex) continue; continue; /* Skip to next top-level test*/ } ms->offset = m->offset; ms->line = m->lineno; /* if main entry matches, print it... */ switch (mget(ms, s, m, nbytes, offset, cont_level, mode, text, flip, recursion_level + 1, printed_something, need_separator, returnval)) { case -1: return -1; case 0: flush = m->reln != '!'; break; default: if (m->type == FILE_INDIRECT) *returnval = 1; switch (magiccheck(ms, m)) { case -1: return -1; case 0: flush++; break; default: flush = 0; break; } break; } if (flush) { /* * main entry didn't match, * flush its continuations */ while (magindex < nmagic - 1 && magic[magindex + 1].cont_level != 0) magindex++; continue; } if ((e = handle_annotation(ms, m)) != 0) { *need_separator = 1; *printed_something = 1; *returnval = 1; return e; } /* * If we are going to print something, we'll need to print * a blank before we print something else. */ if (*m->desc) { *need_separator = 1; *printed_something = 1; if (print_sep(ms, firstline) == -1) return -1; } if (print && mprint(ms, m) == -1) return -1; ms->c.li[cont_level].off = moffset(ms, m); /* and any continuations that match */ if (file_check_mem(ms, ++cont_level) == -1) return -1; while (magindex + 1 < nmagic && magic[magindex + 1].cont_level != 0) { m = &magic[++magindex]; ms->line = m->lineno; /* for messages */ if (cont_level < m->cont_level) continue; if (cont_level > m->cont_level) { /* * We're at the end of the level * "cont_level" continuations. */ cont_level = m->cont_level; } ms->offset = m->offset; if (m->flag & OFFADD) { ms->offset += ms->c.li[cont_level - 1].off; } #ifdef ENABLE_CONDITIONALS if (m->cond == COND_ELSE || m->cond == COND_ELIF) { if (ms->c.li[cont_level].last_match == 1) continue; } #endif switch (mget(ms, s, m, nbytes, offset, cont_level, mode, text, flip, recursion_level + 1, printed_something, need_separator, returnval)) { case -1: return -1; case 0: if (m->reln != '!') continue; flush = 1; break; default: if (m->type == FILE_INDIRECT) *returnval = 1; flush = 0; break; } switch (flush ? 1 : magiccheck(ms, m)) { case -1: return -1; case 0: #ifdef ENABLE_CONDITIONALS ms->c.li[cont_level].last_match = 0; #endif break; default: #ifdef ENABLE_CONDITIONALS ms->c.li[cont_level].last_match = 1; #endif if (m->type == FILE_CLEAR) ms->c.li[cont_level].got_match = 0; else if (ms->c.li[cont_level].got_match) { if (m->type == FILE_DEFAULT) break; } else ms->c.li[cont_level].got_match = 1; if ((e = handle_annotation(ms, m)) != 0) { *need_separator = 1; *printed_something = 1; *returnval = 1; return e; } /* * If we are going to print something, * make sure that we have a separator first. */ if (*m->desc) { if (!*printed_something) { *printed_something = 1; if (print_sep(ms, firstline) == -1) return -1; } } /* * This continuation matched. Print * its message, with a blank before it * if the previous item printed and * this item isn't empty. */ /* space if previous printed */ if (*need_separator && ((m->flag & NOSPACE) == 0) && *m->desc) { if (print && file_printf(ms, " ") == -1) return -1; *need_separator = 0; } if (print && mprint(ms, m) == -1) return -1; ms->c.li[cont_level].off = moffset(ms, m); if (*m->desc) *need_separator = 1; /* * If we see any continuations * at a higher level, * process them. */ if (file_check_mem(ms, ++cont_level) == -1) return -1; break; } } if (*printed_something) { firstline = 0; if (print) *returnval = 1; } if ((ms->flags & MAGIC_CONTINUE) == 0 && *printed_something) { return *returnval; /* don't keep searching */ } } return *returnval; /* This is hit if -k is set or there is no match */ } private int check_fmt(struct magic_set *ms, struct magic *m) { file_regex_t rx; int rc, rv = -1; if (strchr(m->desc, '%') == NULL) return 0; rc = file_regcomp(&rx, "%[-0-9\\.]*s", REG_EXTENDED|REG_NOSUB); if (rc) { file_regerror(&rx, rc, ms); } else { rc = file_regexec(&rx, m->desc, 0, 0, 0); rv = !rc; } file_regfree(&rx); return rv; } #ifndef HAVE_STRNDUP char * strndup(const char *, size_t); char * strndup(const char *str, size_t n) { size_t len; char *copy; for (len = 0; len < n && str[len]; len++) continue; if ((copy = malloc(len + 1)) == NULL) return NULL; (void)memcpy(copy, str, len); copy[len] = '\0'; return copy; } #endif /* HAVE_STRNDUP */ static char * printable(char *buf, size_t bufsiz, const char *str) { char *ptr, *eptr; const unsigned char *s = (const unsigned char *)str; for (ptr = buf, eptr = ptr + bufsiz - 1; ptr < eptr && *s; s++) { if (isprint(*s)) { *ptr++ = *s; continue; } if (ptr >= eptr + 4) break; *ptr++ = '\\'; *ptr++ = ((*s >> 6) & 7) + '0'; *ptr++ = ((*s >> 3) & 7) + '0'; *ptr++ = ((*s >> 0) & 7) + '0'; } *ptr = '\0'; return buf; } private int32_t mprint(struct magic_set *ms, struct magic *m) { uint64_t v; float vf; double vd; int64_t t = 0; char buf[128], tbuf[26]; union VALUETYPE *p = &ms->ms_value; switch (m->type) { case FILE_BYTE: v = file_signextend(ms, m, (uint64_t)p->b); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%d", (unsigned char)v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%d"), (unsigned char) v) == -1) return -1; break; } t = ms->offset + sizeof(char); break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: v = file_signextend(ms, m, (uint64_t)p->h); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%u", (unsigned short)v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%u"), (unsigned short) v) == -1) return -1; break; } t = ms->offset + sizeof(short); break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: v = file_signextend(ms, m, (uint64_t)p->l); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%u", (uint32_t) v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%u"), (uint32_t) v) == -1) return -1; break; } t = ms->offset + sizeof(int32_t); break; case FILE_QUAD: case FILE_BEQUAD: case FILE_LEQUAD: v = file_signextend(ms, m, p->q); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%" INT64_T_FORMAT "u", (unsigned long long)v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%" INT64_T_FORMAT "u"), (unsigned long long) v) == -1) return -1; break; } t = ms->offset + sizeof(int64_t); break; case FILE_STRING: case FILE_PSTRING: case FILE_BESTRING16: case FILE_LESTRING16: if (m->reln == '=' || m->reln == '!') { if (file_printf(ms, F(ms, m, "%s"), m->value.s) == -1) return -1; t = ms->offset + m->vallen; } else { char sbuf[512]; char *str = p->s; /* compute t before we mangle the string? */ t = ms->offset + strlen(str); if (*m->value.s == '\0') str[strcspn(str, "\n")] = '\0'; if (m->str_flags & STRING_TRIM) { char *last; while (isspace((unsigned char)*str)) str++; last = str; while (*last) last++; --last; while (isspace((unsigned char)*last)) last--; *++last = '\0'; } if (file_printf(ms, F(ms, m, "%s"), printable(sbuf, sizeof(sbuf), str)) == -1) return -1; if (m->type == FILE_PSTRING) t += file_pstring_length_size(m); } break; case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->l + m->num_mask, FILE_T_LOCAL, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint32_t); break; case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->l + m->num_mask, 0, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint32_t); break; case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->q + m->num_mask, FILE_T_LOCAL, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->q + m->num_mask, 0, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_QWDATE: case FILE_BEQWDATE: case FILE_LEQWDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->q + m->num_mask, FILE_T_WINDOWS, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: vf = p->f; switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%g", vf); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%g"), vf) == -1) return -1; break; } t = ms->offset + sizeof(float); break; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: vd = p->d; switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%g", vd); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%g"), vd) == -1) return -1; break; } t = ms->offset + sizeof(double); break; case FILE_REGEX: { char *cp; int rval; cp = strndup((const char *)ms->search.s, ms->search.rm_len); if (cp == NULL) { file_oomem(ms, ms->search.rm_len); return -1; } rval = file_printf(ms, F(ms, m, "%s"), cp); free(cp); if (rval == -1) return -1; if ((m->str_flags & REGEX_OFFSET_START)) t = ms->search.offset; else t = ms->search.offset + ms->search.rm_len; break; } case FILE_SEARCH: if (file_printf(ms, F(ms, m, "%s"), m->value.s) == -1) return -1; if ((m->str_flags & REGEX_OFFSET_START)) t = ms->search.offset; else t = ms->search.offset + m->vallen; break; case FILE_DEFAULT: case FILE_CLEAR: if (file_printf(ms, "%s", m->desc) == -1) return -1; t = ms->offset; break; case FILE_INDIRECT: case FILE_USE: case FILE_NAME: t = ms->offset; break; default: file_magerror(ms, "invalid m->type (%d) in mprint()", m->type); return -1; } return (int32_t)t; } private int32_t moffset(struct magic_set *ms, struct magic *m) { switch (m->type) { case FILE_BYTE: return CAST(int32_t, (ms->offset + sizeof(char))); case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: return CAST(int32_t, (ms->offset + sizeof(short))); case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: return CAST(int32_t, (ms->offset + sizeof(int32_t))); case FILE_QUAD: case FILE_BEQUAD: case FILE_LEQUAD: return CAST(int32_t, (ms->offset + sizeof(int64_t))); case FILE_STRING: case FILE_PSTRING: case FILE_BESTRING16: case FILE_LESTRING16: if (m->reln == '=' || m->reln == '!') return ms->offset + m->vallen; else { union VALUETYPE *p = &ms->ms_value; uint32_t t; if (*m->value.s == '\0') p->s[strcspn(p->s, "\n")] = '\0'; t = CAST(uint32_t, (ms->offset + strlen(p->s))); if (m->type == FILE_PSTRING) t += (uint32_t)file_pstring_length_size(m); return t; } case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: return CAST(int32_t, (ms->offset + sizeof(uint32_t))); case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: return CAST(int32_t, (ms->offset + sizeof(uint32_t))); case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: return CAST(int32_t, (ms->offset + sizeof(uint64_t))); case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: return CAST(int32_t, (ms->offset + sizeof(uint64_t))); case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: return CAST(int32_t, (ms->offset + sizeof(float))); case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: return CAST(int32_t, (ms->offset + sizeof(double))); case FILE_REGEX: if ((m->str_flags & REGEX_OFFSET_START) != 0) return CAST(int32_t, ms->search.offset); else return CAST(int32_t, (ms->search.offset + ms->search.rm_len)); case FILE_SEARCH: if ((m->str_flags & REGEX_OFFSET_START) != 0) return CAST(int32_t, ms->search.offset); else return CAST(int32_t, (ms->search.offset + m->vallen)); case FILE_CLEAR: case FILE_DEFAULT: case FILE_INDIRECT: return ms->offset; default: return 0; } } private int cvt_flip(int type, int flip) { if (flip == 0) return type; switch (type) { case FILE_BESHORT: return FILE_LESHORT; case FILE_BELONG: return FILE_LELONG; case FILE_BEDATE: return FILE_LEDATE; case FILE_BELDATE: return FILE_LELDATE; case FILE_BEQUAD: return FILE_LEQUAD; case FILE_BEQDATE: return FILE_LEQDATE; case FILE_BEQLDATE: return FILE_LEQLDATE; case FILE_BEQWDATE: return FILE_LEQWDATE; case FILE_LESHORT: return FILE_BESHORT; case FILE_LELONG: return FILE_BELONG; case FILE_LEDATE: return FILE_BEDATE; case FILE_LELDATE: return FILE_BELDATE; case FILE_LEQUAD: return FILE_BEQUAD; case FILE_LEQDATE: return FILE_BEQDATE; case FILE_LEQLDATE: return FILE_BEQLDATE; case FILE_LEQWDATE: return FILE_BEQWDATE; case FILE_BEFLOAT: return FILE_LEFLOAT; case FILE_LEFLOAT: return FILE_BEFLOAT; case FILE_BEDOUBLE: return FILE_LEDOUBLE; case FILE_LEDOUBLE: return FILE_BEDOUBLE; default: return type; } } #define DO_CVT(fld, cast) \ if (m->num_mask) \ switch (m->mask_op & FILE_OPS_MASK) { \ case FILE_OPAND: \ p->fld &= cast m->num_mask; \ break; \ case FILE_OPOR: \ p->fld |= cast m->num_mask; \ break; \ case FILE_OPXOR: \ p->fld ^= cast m->num_mask; \ break; \ case FILE_OPADD: \ p->fld += cast m->num_mask; \ break; \ case FILE_OPMINUS: \ p->fld -= cast m->num_mask; \ break; \ case FILE_OPMULTIPLY: \ p->fld *= cast m->num_mask; \ break; \ case FILE_OPDIVIDE: \ p->fld /= cast m->num_mask; \ break; \ case FILE_OPMODULO: \ p->fld %= cast m->num_mask; \ break; \ } \ if (m->mask_op & FILE_OPINVERSE) \ p->fld = ~p->fld \ private void cvt_8(union VALUETYPE *p, const struct magic *m) { DO_CVT(b, (uint8_t)); } private void cvt_16(union VALUETYPE *p, const struct magic *m) { DO_CVT(h, (uint16_t)); } private void cvt_32(union VALUETYPE *p, const struct magic *m) { DO_CVT(l, (uint32_t)); } private void cvt_64(union VALUETYPE *p, const struct magic *m) { DO_CVT(q, (uint64_t)); } #define DO_CVT2(fld, cast) \ if (m->num_mask) \ switch (m->mask_op & FILE_OPS_MASK) { \ case FILE_OPADD: \ p->fld += cast m->num_mask; \ break; \ case FILE_OPMINUS: \ p->fld -= cast m->num_mask; \ break; \ case FILE_OPMULTIPLY: \ p->fld *= cast m->num_mask; \ break; \ case FILE_OPDIVIDE: \ p->fld /= cast m->num_mask; \ break; \ } \ private void cvt_float(union VALUETYPE *p, const struct magic *m) { DO_CVT2(f, (float)); } private void cvt_double(union VALUETYPE *p, const struct magic *m) { DO_CVT2(d, (double)); } /* * Convert the byte order of the data we are looking at * While we're here, let's apply the mask operation * (unless you have a better idea) */ private int mconvert(struct magic_set *ms, struct magic *m, int flip) { union VALUETYPE *p = &ms->ms_value; uint8_t type; switch (type = cvt_flip(m->type, flip)) { case FILE_BYTE: cvt_8(p, m); return 1; case FILE_SHORT: cvt_16(p, m); return 1; case FILE_LONG: case FILE_DATE: case FILE_LDATE: cvt_32(p, m); return 1; case FILE_QUAD: case FILE_QDATE: case FILE_QLDATE: case FILE_QWDATE: cvt_64(p, m); return 1; case FILE_STRING: case FILE_BESTRING16: case FILE_LESTRING16: { /* Null terminate and eat *trailing* return */ p->s[sizeof(p->s) - 1] = '\0'; return 1; } case FILE_PSTRING: { size_t sz = file_pstring_length_size(m); char *ptr1 = p->s, *ptr2 = ptr1 + sz; size_t len = file_pstring_get_length(m, ptr1); sz = sizeof(p->s) - sz; /* maximum length of string */ if (len >= sz) { /* * The size of the pascal string length (sz) * is 1, 2, or 4. We need at least 1 byte for NUL * termination, but we've already truncated the * string by p->s, so we need to deduct sz. * Because we can use one of the bytes of the length * after we shifted as NUL termination. */ len = sz; } while (len--) *ptr1++ = *ptr2++; *ptr1 = '\0'; return 1; } case FILE_BESHORT: p->h = (short)((p->hs[0]<<8)|(p->hs[1])); cvt_16(p, m); return 1; case FILE_BELONG: case FILE_BEDATE: case FILE_BELDATE: p->l = (int32_t) ((p->hl[0]<<24)|(p->hl[1]<<16)|(p->hl[2]<<8)|(p->hl[3])); if (type == FILE_BELONG) cvt_32(p, m); return 1; case FILE_BEQUAD: case FILE_BEQDATE: case FILE_BEQLDATE: case FILE_BEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8)|((uint64_t)p->hq[7])); if (type == FILE_BEQUAD) cvt_64(p, m); return 1; case FILE_LESHORT: p->h = (short)((p->hs[1]<<8)|(p->hs[0])); cvt_16(p, m); return 1; case FILE_LELONG: case FILE_LEDATE: case FILE_LELDATE: p->l = (int32_t) ((p->hl[3]<<24)|(p->hl[2]<<16)|(p->hl[1]<<8)|(p->hl[0])); if (type == FILE_LELONG) cvt_32(p, m); return 1; case FILE_LEQUAD: case FILE_LEQDATE: case FILE_LEQLDATE: case FILE_LEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8)|((uint64_t)p->hq[0])); if (type == FILE_LEQUAD) cvt_64(p, m); return 1; case FILE_MELONG: case FILE_MEDATE: case FILE_MELDATE: p->l = (int32_t) ((p->hl[1]<<24)|(p->hl[0]<<16)|(p->hl[3]<<8)|(p->hl[2])); if (type == FILE_MELONG) cvt_32(p, m); return 1; case FILE_FLOAT: cvt_float(p, m); return 1; case FILE_BEFLOAT: p->l = ((uint32_t)p->hl[0]<<24)|((uint32_t)p->hl[1]<<16)| ((uint32_t)p->hl[2]<<8) |((uint32_t)p->hl[3]); cvt_float(p, m); return 1; case FILE_LEFLOAT: p->l = ((uint32_t)p->hl[3]<<24)|((uint32_t)p->hl[2]<<16)| ((uint32_t)p->hl[1]<<8) |((uint32_t)p->hl[0]); cvt_float(p, m); return 1; case FILE_DOUBLE: cvt_double(p, m); return 1; case FILE_BEDOUBLE: p->q = ((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8) |((uint64_t)p->hq[7]); cvt_double(p, m); return 1; case FILE_LEDOUBLE: p->q = ((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8) |((uint64_t)p->hq[0]); cvt_double(p, m); return 1; case FILE_REGEX: case FILE_SEARCH: case FILE_DEFAULT: case FILE_CLEAR: case FILE_NAME: case FILE_USE: return 1; default: file_magerror(ms, "invalid type %d in mconvert()", m->type); return 0; } } private void mdebug(uint32_t offset, const char *str, size_t len) { (void) fprintf(stderr, "mget/%" SIZE_T_FORMAT "u @%d: ", len, offset); file_showstr(stderr, str, len); (void) fputc('\n', stderr); (void) fputc('\n', stderr); } private int mcopy(struct magic_set *ms, union VALUETYPE *p, int type, int indir, const unsigned char *s, uint32_t offset, size_t nbytes, struct magic *m) { /* * Note: FILE_SEARCH and FILE_REGEX do not actually copy * anything, but setup pointers into the source */ if (indir == 0) { switch (type) { case FILE_SEARCH: ms->search.s = RCAST(const char *, s) + offset; ms->search.s_len = nbytes - offset; ms->search.offset = offset; return 0; case FILE_REGEX: { const char *b; const char *c; const char *last; /* end of search region */ const char *buf; /* start of search region */ const char *end; size_t lines, linecnt, bytecnt; if (s == NULL) { ms->search.s_len = 0; ms->search.s = NULL; return 0; } if (m->str_flags & REGEX_LINE_COUNT) { linecnt = m->str_range; bytecnt = linecnt * 80; } else { linecnt = 0; bytecnt = m->str_range; } if (bytecnt == 0) bytecnt = 8192; if (bytecnt > nbytes) bytecnt = nbytes; buf = RCAST(const char *, s) + offset; end = last = RCAST(const char *, s) + bytecnt; /* mget() guarantees buf <= last */ for (lines = linecnt, b = buf; lines && b < end && ((b = CAST(const char *, memchr(c = b, '\n', CAST(size_t, (end - b))))) || (b = CAST(const char *, memchr(c, '\r', CAST(size_t, (end - c)))))); lines--, b++) { last = b; if (b[0] == '\r' && b[1] == '\n') b++; } if (lines) last = RCAST(const char *, s) + bytecnt; ms->search.s = buf; ms->search.s_len = last - buf; ms->search.offset = offset; ms->search.rm_len = 0; return 0; } case FILE_BESTRING16: case FILE_LESTRING16: { const unsigned char *src = s + offset; const unsigned char *esrc = s + nbytes; char *dst = p->s; char *edst = &p->s[sizeof(p->s) - 1]; if (type == FILE_BESTRING16) src++; /* check that offset is within range */ if (offset >= nbytes) break; for (/*EMPTY*/; src < esrc; src += 2, dst++) { if (dst < edst) *dst = *src; else break; if (*dst == '\0') { if (type == FILE_BESTRING16 ? *(src - 1) != '\0' : *(src + 1) != '\0') *dst = ' '; } } *edst = '\0'; return 0; } case FILE_STRING: /* XXX - these two should not need */ case FILE_PSTRING: /* to copy anything, but do anyway. */ default: break; } } if (offset >= nbytes) { (void)memset(p, '\0', sizeof(*p)); return 0; } if (nbytes - offset < sizeof(*p)) nbytes = nbytes - offset; else nbytes = sizeof(*p); (void)memcpy(p, s + offset, nbytes); /* * the usefulness of padding with zeroes eludes me, it * might even cause problems */ if (nbytes < sizeof(*p)) (void)memset(((char *)(void *)p) + nbytes, '\0', sizeof(*p) - nbytes); return 0; } private int mget(struct magic_set *ms, const unsigned char *s, struct magic *m, size_t nbytes, size_t o, unsigned int cont_level, int mode, int text, int flip, int recursion_level, int *printed_something, int *need_separator, int *returnval) { uint32_t offset = ms->offset; uint32_t lhs; file_pushbuf_t *pb; int rv, oneed_separator, in_type; char *rbuf; union VALUETYPE *p = &ms->ms_value; struct mlist ml; if (recursion_level >= MAX_RECURSION_LEVEL) { file_error(ms, 0, "recursion nesting exceeded"); return -1; } if (mcopy(ms, p, m->type, m->flag & INDIR, s, (uint32_t)(offset + o), (uint32_t)nbytes, m) == -1) return -1; if ((ms->flags & MAGIC_DEBUG) != 0) { fprintf(stderr, "mget(type=%d, flag=%x, offset=%u, o=%" SIZE_T_FORMAT "u, " "nbytes=%" SIZE_T_FORMAT "u)\n", m->type, m->flag, offset, o, nbytes); mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE)); #ifndef COMPILE_ONLY file_mdump(m); #endif } if (m->flag & INDIR) { int off = m->in_offset; if (m->in_op & FILE_OPINDIRECT) { const union VALUETYPE *q = CAST(const union VALUETYPE *, ((const void *)(s + offset + off))); switch (cvt_flip(m->in_type, flip)) { case FILE_BYTE: off = q->b; break; case FILE_SHORT: off = q->h; break; case FILE_BESHORT: off = (short)((q->hs[0]<<8)|(q->hs[1])); break; case FILE_LESHORT: off = (short)((q->hs[1]<<8)|(q->hs[0])); break; case FILE_LONG: off = q->l; break; case FILE_BELONG: case FILE_BEID3: off = (int32_t)((q->hl[0]<<24)|(q->hl[1]<<16)| (q->hl[2]<<8)|(q->hl[3])); break; case FILE_LEID3: case FILE_LELONG: off = (int32_t)((q->hl[3]<<24)|(q->hl[2]<<16)| (q->hl[1]<<8)|(q->hl[0])); break; case FILE_MELONG: off = (int32_t)((q->hl[1]<<24)|(q->hl[0]<<16)| (q->hl[3]<<8)|(q->hl[2])); break; } if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect offs=%u\n", off); } switch (in_type = cvt_flip(m->in_type, flip)) { case FILE_BYTE: if (OFFSET_OOB(nbytes, offset, 1)) return 0; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = p->b & off; break; case FILE_OPOR: offset = p->b | off; break; case FILE_OPXOR: offset = p->b ^ off; break; case FILE_OPADD: offset = p->b + off; break; case FILE_OPMINUS: offset = p->b - off; break; case FILE_OPMULTIPLY: offset = p->b * off; break; case FILE_OPDIVIDE: offset = p->b / off; break; case FILE_OPMODULO: offset = p->b % off; break; } } else offset = p->b; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_BESHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; lhs = (p->hs[0] << 8) | p->hs[1]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_LESHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; lhs = (p->hs[1] << 8) | p->hs[0]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_SHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = p->h & off; break; case FILE_OPOR: offset = p->h | off; break; case FILE_OPXOR: offset = p->h ^ off; break; case FILE_OPADD: offset = p->h + off; break; case FILE_OPMINUS: offset = p->h - off; break; case FILE_OPMULTIPLY: offset = p->h * off; break; case FILE_OPDIVIDE: offset = p->h / off; break; case FILE_OPMODULO: offset = p->h % off; break; } } else offset = p->h; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_BELONG: case FILE_BEID3: if (OFFSET_OOB(nbytes, offset, 4)) return 0; lhs = (p->hl[0] << 24) | (p->hl[1] << 16) | (p->hl[2] << 8) | p->hl[3]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_LELONG: case FILE_LEID3: if (OFFSET_OOB(nbytes, offset, 4)) return 0; lhs = (p->hl[3] << 24) | (p->hl[2] << 16) | (p->hl[1] << 8) | p->hl[0]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_MELONG: if (OFFSET_OOB(nbytes, offset, 4)) return 0; lhs = (p->hl[1] << 24) | (p->hl[0] << 16) | (p->hl[3] << 8) | p->hl[2]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_LONG: if (OFFSET_OOB(nbytes, offset, 4)) return 0; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = p->l & off; break; case FILE_OPOR: offset = p->l | off; break; case FILE_OPXOR: offset = p->l ^ off; break; case FILE_OPADD: offset = p->l + off; break; case FILE_OPMINUS: offset = p->l - off; break; case FILE_OPMULTIPLY: offset = p->l * off; break; case FILE_OPDIVIDE: offset = p->l / off; break; case FILE_OPMODULO: offset = p->l % off; break; } } else offset = p->l; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; default: break; } switch (in_type) { case FILE_LEID3: case FILE_BEID3: offset = ((((offset >> 0) & 0x7f) << 0) | (((offset >> 8) & 0x7f) << 7) | (((offset >> 16) & 0x7f) << 14) | (((offset >> 24) & 0x7f) << 21)) + 10; break; default: break; } if (m->flag & INDIROFFADD) { offset += ms->c.li[cont_level-1].off; if (offset == 0) { if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect *zero* offset\n"); return 0; } if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect +offs=%u\n", offset); } if (mcopy(ms, p, m->type, 0, s, offset, nbytes, m) == -1) return -1; ms->offset = offset; if ((ms->flags & MAGIC_DEBUG) != 0) { mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE)); #ifndef COMPILE_ONLY file_mdump(m); #endif } } /* Verify we have enough data to match magic type */ switch (m->type) { case FILE_BYTE: if (OFFSET_OOB(nbytes, offset, 1)) return 0; break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: if (OFFSET_OOB(nbytes, offset, 4)) return 0; break; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: if (OFFSET_OOB(nbytes, offset, 8)) return 0; break; case FILE_STRING: case FILE_PSTRING: case FILE_SEARCH: if (OFFSET_OOB(nbytes, offset, m->vallen)) return 0; break; case FILE_REGEX: if (nbytes < offset) return 0; break; case FILE_INDIRECT: if (offset == 0) return 0; if (nbytes < offset) return 0; if ((pb = file_push_buffer(ms)) == NULL) return -1; rv = file_softmagic(ms, s + offset, nbytes - offset, recursion_level, BINTEST, text); if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv); rbuf = file_pop_buffer(ms, pb); if (rbuf == NULL) return -1; if (rv == 1) { if ((ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0 && file_printf(ms, F(ms, m, "%u"), offset) == -1) { free(rbuf); return -1; } if (file_printf(ms, "%s", rbuf) == -1) { free(rbuf); return -1; } } free(rbuf); return rv; case FILE_USE: if (nbytes < offset) return 0; rbuf = m->value.s; if (*rbuf == '^') { rbuf++; flip = !flip; } if (file_magicfind(ms, rbuf, &ml) == -1) { file_error(ms, 0, "cannot find entry `%s'", rbuf); return -1; } oneed_separator = *need_separator; if (m->flag & NOSPACE) *need_separator = 0; rv = match(ms, ml.magic, ml.nmagic, s, nbytes, offset + o, mode, text, flip, recursion_level, printed_something, need_separator, returnval); if (rv != 1) *need_separator = oneed_separator; return rv; case FILE_NAME: if (file_printf(ms, "%s", m->desc) == -1) return -1; return 1; case FILE_DEFAULT: /* nothing to check */ case FILE_CLEAR: default: break; } if (!mconvert(ms, m, flip)) return 0; return 1; } private uint64_t file_strncmp(const char *s1, const char *s2, size_t len, uint32_t flags) { /* * Convert the source args to unsigned here so that (1) the * compare will be unsigned as it is in strncmp() and (2) so * the ctype functions will work correctly without extra * casting. */ const unsigned char *a = (const unsigned char *)s1; const unsigned char *b = (const unsigned char *)s2; uint64_t v; /* * What we want here is v = strncmp(s1, s2, len), * but ignoring any nulls. */ v = 0; if (0L == flags) { /* normal string: do it fast */ while (len-- > 0) if ((v = *b++ - *a++) != '\0') break; } else { /* combine the others */ while (len-- > 0) { if ((flags & STRING_IGNORE_LOWERCASE) && islower(*a)) { if ((v = tolower(*b++) - *a++) != '\0') break; } else if ((flags & STRING_IGNORE_UPPERCASE) && isupper(*a)) { if ((v = toupper(*b++) - *a++) != '\0') break; } else if ((flags & STRING_COMPACT_WHITESPACE) && isspace(*a)) { a++; if (isspace(*b++)) { if (!isspace(*a)) while (isspace(*b)) b++; } else { v = 1; break; } } else if ((flags & STRING_COMPACT_OPTIONAL_WHITESPACE) && isspace(*a)) { a++; while (isspace(*b)) b++; } else { if ((v = *b++ - *a++) != '\0') break; } } } return v; } private uint64_t file_strncmp16(const char *a, const char *b, size_t len, uint32_t flags) { /* * XXX - The 16-bit string compare probably needs to be done * differently, especially if the flags are to be supported. * At the moment, I am unsure. */ flags = 0; return file_strncmp(a, b, len, flags); } private int magiccheck(struct magic_set *ms, struct magic *m) { uint64_t l = m->value.q; uint64_t v; float fl, fv; double dl, dv; int matched; union VALUETYPE *p = &ms->ms_value; switch (m->type) { case FILE_BYTE: v = p->b; break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: v = p->h; break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: v = p->l; break; case FILE_QUAD: case FILE_LEQUAD: case FILE_BEQUAD: case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: case FILE_QWDATE: case FILE_BEQWDATE: case FILE_LEQWDATE: v = p->q; break; case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: fl = m->value.f; fv = p->f; switch (m->reln) { case 'x': matched = 1; break; case '!': matched = fv != fl; break; case '=': matched = fv == fl; break; case '>': matched = fv > fl; break; case '<': matched = fv < fl; break; default: file_magerror(ms, "cannot happen with float: invalid relation `%c'", m->reln); return -1; } return matched; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: dl = m->value.d; dv = p->d; switch (m->reln) { case 'x': matched = 1; break; case '!': matched = dv != dl; break; case '=': matched = dv == dl; break; case '>': matched = dv > dl; break; case '<': matched = dv < dl; break; default: file_magerror(ms, "cannot happen with double: invalid relation `%c'", m->reln); return -1; } return matched; case FILE_DEFAULT: case FILE_CLEAR: l = 0; v = 0; break; case FILE_STRING: case FILE_PSTRING: l = 0; v = file_strncmp(m->value.s, p->s, (size_t)m->vallen, m->str_flags); break; case FILE_BESTRING16: case FILE_LESTRING16: l = 0; v = file_strncmp16(m->value.s, p->s, (size_t)m->vallen, m->str_flags); break; case FILE_SEARCH: { /* search ms->search.s for the string m->value.s */ size_t slen; size_t idx; if (ms->search.s == NULL) return 0; slen = MIN(m->vallen, sizeof(m->value.s)); l = 0; v = 0; for (idx = 0; m->str_range == 0 || idx < m->str_range; idx++) { if (slen + idx > ms->search.s_len) break; v = file_strncmp(m->value.s, ms->search.s + idx, slen, m->str_flags); if (v == 0) { /* found match */ ms->search.offset += idx; break; } } break; } case FILE_REGEX: { int rc; file_regex_t rx; const char *search; if (ms->search.s == NULL) return 0; l = 0; rc = file_regcomp(&rx, m->value.s, REG_EXTENDED|REG_NEWLINE| ((m->str_flags & STRING_IGNORE_CASE) ? REG_ICASE : 0)); if (rc) { file_regerror(&rx, rc, ms); v = (uint64_t)-1; } else { regmatch_t pmatch[1]; size_t slen = ms->search.s_len; #ifndef REG_STARTEND #define REG_STARTEND 0 char *copy; if (slen != 0) { copy = malloc(slen); if (copy == NULL) { file_error(ms, errno, "can't allocate %" SIZE_T_FORMAT "u bytes", slen); return -1; } memcpy(copy, ms->search.s, slen); copy[--slen] = '\0'; search = copy; } else { search = ms->search.s; copy = NULL; } #else search = ms->search.s; pmatch[0].rm_so = 0; pmatch[0].rm_eo = slen; #endif rc = file_regexec(&rx, (const char *)search, 1, pmatch, REG_STARTEND); #if REG_STARTEND == 0 free(copy); #endif switch (rc) { case 0: ms->search.s += (int)pmatch[0].rm_so; ms->search.offset += (size_t)pmatch[0].rm_so; ms->search.rm_len = (size_t)(pmatch[0].rm_eo - pmatch[0].rm_so); v = 0; break; case REG_NOMATCH: v = 1; break; default: file_regerror(&rx, rc, ms); v = (uint64_t)-1; break; } } file_regfree(&rx); if (v == (uint64_t)-1) return -1; break; } case FILE_INDIRECT: case FILE_USE: case FILE_NAME: return 1; default: file_magerror(ms, "invalid type %d in magiccheck()", m->type); return -1; } v = file_signextend(ms, m, v); switch (m->reln) { case 'x': if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u == *any* = 1\n", (unsigned long long)v); matched = 1; break; case '!': matched = v != l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u != %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); break; case '=': matched = v == l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u == %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); break; case '>': if (m->flag & UNSIGNED) { matched = v > l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u > %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); } else { matched = (int64_t) v > (int64_t) l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "d > %" INT64_T_FORMAT "d = %d\n", (long long)v, (long long)l, matched); } break; case '<': if (m->flag & UNSIGNED) { matched = v < l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u < %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); } else { matched = (int64_t) v < (int64_t) l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "d < %" INT64_T_FORMAT "d = %d\n", (long long)v, (long long)l, matched); } break; case '&': matched = (v & l) == l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %" INT64_T_FORMAT "x) == %" INT64_T_FORMAT "x) = %d\n", (unsigned long long)v, (unsigned long long)l, (unsigned long long)l, matched); break; case '^': matched = (v & l) != l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %" INT64_T_FORMAT "x) != %" INT64_T_FORMAT "x) = %d\n", (unsigned long long)v, (unsigned long long)l, (unsigned long long)l, matched); break; default: file_magerror(ms, "cannot happen: invalid relation `%c'", m->reln); return -1; } return matched; } private int handle_annotation(struct magic_set *ms, struct magic *m) { if (ms->flags & MAGIC_APPLE) { if (file_printf(ms, "%.8s", m->apple) == -1) return -1; return 1; } if ((ms->flags & MAGIC_MIME_TYPE) && m->mimetype[0]) { if (file_printf(ms, "%s", m->mimetype) == -1) return -1; return 1; } return 0; } private int print_sep(struct magic_set *ms, int firstline) { if (ms->flags & MAGIC_MIME) return 0; if (firstline) return 0; /* * we found another match * put a newline and '-' to do some simple formatting */ return file_printf(ms, "\n- "); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_2297_2
crossvul-cpp_data_bad_4984_4
/* * sysctl.c: General linux system control interface * * Begun 24 March 1995, Stephen Tweedie * Added /proc support, Dec 1995 * Added bdflush entry and intvec min/max checking, 2/23/96, Tom Dyas. * Added hooks for /proc/sys/net (minor, minor patch), 96/4/1, Mike Shaver. * Added kernel/java-{interpreter,appletviewer}, 96/5/10, Mike Shaver. * Dynamic registration fixes, Stephen Tweedie. * Added kswapd-interval, ctrl-alt-del, printk stuff, 1/8/97, Chris Horn. * Made sysctl support optional via CONFIG_SYSCTL, 1/10/97, Chris * Horn. * Added proc_doulongvec_ms_jiffies_minmax, 09/08/99, Carlos H. Bauer. * Added proc_doulongvec_minmax, 09/08/99, Carlos H. Bauer. * Changed linked lists to use list.h instead of lists.h, 02/24/00, Bill * Wendling. * The list_for_each() macro wasn't appropriate for the sysctl loop. * Removed it and replaced it with older style, 03/23/00, Bill Wendling */ #include <linux/module.h> #include <linux/aio.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/bitmap.h> #include <linux/signal.h> #include <linux/printk.h> #include <linux/proc_fs.h> #include <linux/security.h> #include <linux/ctype.h> #include <linux/kmemcheck.h> #include <linux/kmemleak.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kobject.h> #include <linux/net.h> #include <linux/sysrq.h> #include <linux/highuid.h> #include <linux/writeback.h> #include <linux/ratelimit.h> #include <linux/compaction.h> #include <linux/hugetlb.h> #include <linux/initrd.h> #include <linux/key.h> #include <linux/times.h> #include <linux/limits.h> #include <linux/dcache.h> #include <linux/dnotify.h> #include <linux/syscalls.h> #include <linux/vmstat.h> #include <linux/nfs_fs.h> #include <linux/acpi.h> #include <linux/reboot.h> #include <linux/ftrace.h> #include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/pipe_fs_i.h> #include <linux/oom.h> #include <linux/kmod.h> #include <linux/capability.h> #include <linux/binfmts.h> #include <linux/sched/sysctl.h> #include <linux/kexec.h> #include <linux/bpf.h> #include <asm/uaccess.h> #include <asm/processor.h> #ifdef CONFIG_X86 #include <asm/nmi.h> #include <asm/stacktrace.h> #include <asm/io.h> #endif #ifdef CONFIG_SPARC #include <asm/setup.h> #endif #ifdef CONFIG_BSD_PROCESS_ACCT #include <linux/acct.h> #endif #ifdef CONFIG_RT_MUTEXES #include <linux/rtmutex.h> #endif #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT) #include <linux/lockdep.h> #endif #ifdef CONFIG_CHR_DEV_SG #include <scsi/sg.h> #endif #ifdef CONFIG_LOCKUP_DETECTOR #include <linux/nmi.h> #endif #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ extern int suid_dumpable; #ifdef CONFIG_COREDUMP extern int core_uses_pid; extern char core_pattern[]; extern unsigned int core_pipe_limit; #endif extern int pid_max; extern int pid_max_min, pid_max_max; extern int percpu_pagelist_fraction; extern int compat_log; extern int latencytop_enabled; extern int sysctl_nr_open_min, sysctl_nr_open_max; #ifndef CONFIG_MMU extern int sysctl_nr_trim_pages; #endif /* Constants used for minimum and maximum */ #ifdef CONFIG_LOCKUP_DETECTOR static int sixty = 60; #endif static int __maybe_unused neg_one = -1; static int zero; static int __maybe_unused one = 1; static int __maybe_unused two = 2; static int __maybe_unused four = 4; static unsigned long one_ul = 1; static int one_hundred = 100; #ifdef CONFIG_PRINTK static int ten_thousand = 10000; #endif /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; static int minolduid; static int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */ #ifdef CONFIG_DETECT_HUNG_TASK static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); #endif #ifdef CONFIG_INOTIFY_USER #include <linux/inotify.h> #endif #ifdef CONFIG_SPARC #endif #ifdef __hppa__ extern int pwrsw_enabled; #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW extern int unaligned_enabled; #endif #ifdef CONFIG_IA64 extern int unaligned_dump_stack; #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN extern int no_unaligned_warning; #endif #ifdef CONFIG_PROC_SYSCTL #define SYSCTL_WRITES_LEGACY -1 #define SYSCTL_WRITES_WARN 0 #define SYSCTL_WRITES_STRICT 1 static int sysctl_writes_strict = SYSCTL_WRITES_WARN; static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #ifdef CONFIG_PRINTK static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #ifdef CONFIG_COREDUMP static int proc_dostring_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #ifdef CONFIG_MAGIC_SYSRQ /* Note: sysrq code uses it's own private copy */ static int __sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; static int sysrq_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error; error = proc_dointvec(table, write, buffer, lenp, ppos); if (error) return error; if (write) sysrq_toggle_support(__sysrq_enabled); return 0; } #endif static struct ctl_table kern_table[]; static struct ctl_table vm_table[]; static struct ctl_table fs_table[]; static struct ctl_table debug_table[]; static struct ctl_table dev_table[]; extern struct ctl_table random_table[]; #ifdef CONFIG_EPOLL extern struct ctl_table epoll_table[]; #endif #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT int sysctl_legacy_va_layout; #endif /* The default sysctl tables: */ static struct ctl_table sysctl_base_table[] = { { .procname = "kernel", .mode = 0555, .child = kern_table, }, { .procname = "vm", .mode = 0555, .child = vm_table, }, { .procname = "fs", .mode = 0555, .child = fs_table, }, { .procname = "debug", .mode = 0555, .child = debug_table, }, { .procname = "dev", .mode = 0555, .child = dev_table, }, { } }; #ifdef CONFIG_SCHED_DEBUG static int min_sched_granularity_ns = 100000; /* 100 usecs */ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int min_wakeup_granularity_ns; /* 0 usecs */ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ #ifdef CONFIG_SMP static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1; #endif /* CONFIG_SMP */ #endif /* CONFIG_SCHED_DEBUG */ #ifdef CONFIG_COMPACTION static int min_extfrag_threshold; static int max_extfrag_threshold = 1000; #endif static struct ctl_table kern_table[] = { { .procname = "sched_child_runs_first", .data = &sysctl_sched_child_runs_first, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_SCHED_DEBUG { .procname = "sched_min_granularity_ns", .data = &sysctl_sched_min_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, { .procname = "sched_latency_ns", .data = &sysctl_sched_latency, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, { .procname = "sched_wakeup_granularity_ns", .data = &sysctl_sched_wakeup_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_wakeup_granularity_ns, .extra2 = &max_wakeup_granularity_ns, }, #ifdef CONFIG_SMP { .procname = "sched_tunable_scaling", .data = &sysctl_sched_tunable_scaling, .maxlen = sizeof(enum sched_tunable_scaling), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_tunable_scaling, .extra2 = &max_sched_tunable_scaling, }, { .procname = "sched_migration_cost_ns", .data = &sysctl_sched_migration_cost, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_nr_migrate", .data = &sysctl_sched_nr_migrate, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_time_avg_ms", .data = &sysctl_sched_time_avg, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_shares_window_ns", .data = &sysctl_sched_shares_window, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif /* CONFIG_SMP */ #ifdef CONFIG_NUMA_BALANCING { .procname = "numa_balancing_scan_delay_ms", .data = &sysctl_numa_balancing_scan_delay, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_period_min_ms", .data = &sysctl_numa_balancing_scan_period_min, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_period_max_ms", .data = &sysctl_numa_balancing_scan_period_max, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_size_mb", .data = &sysctl_numa_balancing_scan_size, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &one, }, { .procname = "numa_balancing", .data = NULL, /* filled in by handler */ .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sysctl_numa_balancing, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ { .procname = "sched_rt_period_us", .data = &sysctl_sched_rt_period, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_rt_handler, }, { .procname = "sched_rt_runtime_us", .data = &sysctl_sched_rt_runtime, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rt_handler, }, { .procname = "sched_rr_timeslice_ms", .data = &sched_rr_timeslice, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rr_handler, }, #ifdef CONFIG_SCHED_AUTOGROUP { .procname = "sched_autogroup_enabled", .data = &sysctl_sched_autogroup_enabled, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_CFS_BANDWIDTH { .procname = "sched_cfs_bandwidth_slice_us", .data = &sysctl_sched_cfs_bandwidth_slice, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &one, }, #endif #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", .data = &prove_locking, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_LOCK_STAT { .procname = "lock_stat", .data = &lock_stat, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "panic", .data = &panic_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_COREDUMP { .procname = "core_uses_pid", .data = &core_uses_pid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "core_pattern", .data = core_pattern, .maxlen = CORENAME_MAX_SIZE, .mode = 0644, .proc_handler = proc_dostring_coredump, }, { .procname = "core_pipe_limit", .data = &core_pipe_limit, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_taint, }, { .procname = "sysctl_writes_strict", .data = &sysctl_writes_strict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &neg_one, .extra2 = &one, }, #endif #ifdef CONFIG_LATENCYTOP { .procname = "latencytop", .data = &latencytop_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BLK_DEV_INITRD { .procname = "real-root-dev", .data = &real_root_dev, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "print-fatal-signals", .data = &print_fatal_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_SPARC { .procname = "reboot-cmd", .data = reboot_command, .maxlen = 256, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "stop-a", .data = &stop_a_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "scons-poweroff", .data = &scons_pwroff, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SPARC64 { .procname = "tsb-ratio", .data = &sysctl_tsb_ratio, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef __hppa__ { .procname = "soft-power", .data = &pwrsw_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW { .procname = "unaligned-trap", .data = &unaligned_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "ctrl-alt-del", .data = &C_A_D, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_FUNCTION_TRACER { .procname = "ftrace_enabled", .data = &ftrace_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = ftrace_enable_sysctl, }, #endif #ifdef CONFIG_STACK_TRACER { .procname = "stack_tracer_enabled", .data = &stack_tracer_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = stack_trace_sysctl, }, #endif #ifdef CONFIG_TRACING { .procname = "ftrace_dump_on_oops", .data = &ftrace_dump_on_oops, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "traceoff_on_warning", .data = &__disable_trace_on_warning, .maxlen = sizeof(__disable_trace_on_warning), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "tracepoint_printk", .data = &tracepoint_printk, .maxlen = sizeof(tracepoint_printk), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_KEXEC_CORE { .procname = "kexec_load_disabled", .data = &kexec_load_disabled, .maxlen = sizeof(int), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif #ifdef CONFIG_MODULES { .procname = "modprobe", .data = &modprobe_path, .maxlen = KMOD_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "modules_disabled", .data = &modules_disabled, .maxlen = sizeof(int), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif #ifdef CONFIG_UEVENT_HELPER { .procname = "hotplug", .data = &uevent_helper, .maxlen = UEVENT_HELPER_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, #endif #ifdef CONFIG_CHR_DEV_SG { .procname = "sg-big-buff", .data = &sg_big_buff, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BSD_PROCESS_ACCT { .procname = "acct", .data = &acct_parm, .maxlen = 3*sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MAGIC_SYSRQ { .procname = "sysrq", .data = &__sysrq_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = sysrq_sysctl_handler, }, #endif #ifdef CONFIG_PROC_SYSCTL { .procname = "cad_pid", .data = NULL, .maxlen = sizeof (int), .mode = 0600, .proc_handler = proc_do_cad_pid, }, #endif { .procname = "threads-max", .data = NULL, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sysctl_max_threads, }, { .procname = "random", .mode = 0555, .child = random_table, }, { .procname = "usermodehelper", .mode = 0555, .child = usermodehelper_table, }, { .procname = "overflowuid", .data = &overflowuid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, { .procname = "overflowgid", .data = &overflowgid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, #ifdef CONFIG_S390 #ifdef CONFIG_MATHEMU { .procname = "ieee_emulation_warnings", .data = &sysctl_ieee_emulation_warnings, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "userprocess_debug", .data = &show_unhandled_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "pid_max", .data = &pid_max, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, { .procname = "panic_on_oops", .data = &panic_on_oops, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #if defined CONFIG_PRINTK { .procname = "printk", .data = &console_loglevel, .maxlen = 4*sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "printk_ratelimit", .data = &printk_ratelimit_state.interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "printk_ratelimit_burst", .data = &printk_ratelimit_state.burst, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "printk_delay", .data = &printk_delay_msec, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &ten_thousand, }, { .procname = "dmesg_restrict", .data = &dmesg_restrict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_sysadmin, .extra1 = &zero, .extra2 = &one, }, { .procname = "kptr_restrict", .data = &kptr_restrict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_sysadmin, .extra1 = &zero, .extra2 = &two, }, #endif { .procname = "ngroups_max", .data = &ngroups_max, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "cap_last_cap", .data = (void *)&cap_last_cap, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, #if defined(CONFIG_LOCKUP_DETECTOR) { .procname = "watchdog", .data = &watchdog_user_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_watchdog, .extra1 = &zero, .extra2 = &one, }, { .procname = "watchdog_thresh", .data = &watchdog_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_watchdog_thresh, .extra1 = &zero, .extra2 = &sixty, }, { .procname = "nmi_watchdog", .data = &nmi_watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_nmi_watchdog, .extra1 = &zero, #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) .extra2 = &one, #else .extra2 = &zero, #endif }, { .procname = "soft_watchdog", .data = &soft_watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_soft_watchdog, .extra1 = &zero, .extra2 = &one, }, { .procname = "watchdog_cpumask", .data = &watchdog_cpumask_bits, .maxlen = NR_CPUS, .mode = 0644, .proc_handler = proc_watchdog_cpumask, }, { .procname = "softlockup_panic", .data = &softlockup_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #ifdef CONFIG_HARDLOCKUP_DETECTOR { .procname = "hardlockup_panic", .data = &hardlockup_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_SMP { .procname = "softlockup_all_cpu_backtrace", .data = &sysctl_softlockup_all_cpu_backtrace, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "hardlockup_all_cpu_backtrace", .data = &sysctl_hardlockup_all_cpu_backtrace, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_SMP */ #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) { .procname = "unknown_nmi_panic", .data = &unknown_nmi_panic, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_X86) { .procname = "panic_on_unrecovered_nmi", .data = &panic_on_unrecovered_nmi, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "panic_on_io_nmi", .data = &panic_on_io_nmi, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_DEBUG_STACKOVERFLOW { .procname = "panic_on_stackoverflow", .data = &sysctl_panic_on_stackoverflow, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "bootloader_type", .data = &bootloader_type, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "bootloader_version", .data = &bootloader_version, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "kstack_depth_to_print", .data = &kstack_depth_to_print, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "io_delay_type", .data = &io_delay_type, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_MMU) { .procname = "randomize_va_space", .data = &randomize_va_space, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_S390) && defined(CONFIG_SMP) { .procname = "spin_retry", .data = &spin_retry, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) { .procname = "acpi_video_flags", .data = &acpi_realmode_flags, .maxlen = sizeof (unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN { .procname = "ignore-unaligned-usertrap", .data = &no_unaligned_warning, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_IA64 { .procname = "unaligned-dump-stack", .data = &unaligned_dump_stack, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DETECT_HUNG_TASK { .procname = "hung_task_panic", .data = &sysctl_hung_task_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "hung_task_check_count", .data = &sysctl_hung_task_check_count, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "hung_task_timeout_secs", .data = &sysctl_hung_task_timeout_secs, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_dohung_task_timeout_secs, .extra2 = &hung_task_timeout_max, }, { .procname = "hung_task_warnings", .data = &sysctl_hung_task_warnings, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &neg_one, }, #endif #ifdef CONFIG_COMPAT { .procname = "compat-log", .data = &compat_log, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_RT_MUTEXES { .procname = "max_lock_depth", .data = &max_lock_depth, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "poweroff_cmd", .data = &poweroff_cmd, .maxlen = POWEROFF_CMD_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, #ifdef CONFIG_KEYS { .procname = "keys", .mode = 0555, .child = key_sysctls, }, #endif #ifdef CONFIG_PERF_EVENTS /* * User-space scripts rely on the existence of this file * as a feature check for perf_events being enabled. * * So it's an ABI, do not remove! */ { .procname = "perf_event_paranoid", .data = &sysctl_perf_event_paranoid, .maxlen = sizeof(sysctl_perf_event_paranoid), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "perf_event_mlock_kb", .data = &sysctl_perf_event_mlock, .maxlen = sizeof(sysctl_perf_event_mlock), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "perf_event_max_sample_rate", .data = &sysctl_perf_event_sample_rate, .maxlen = sizeof(sysctl_perf_event_sample_rate), .mode = 0644, .proc_handler = perf_proc_update_handler, .extra1 = &one, }, { .procname = "perf_cpu_time_max_percent", .data = &sysctl_perf_cpu_time_max_percent, .maxlen = sizeof(sysctl_perf_cpu_time_max_percent), .mode = 0644, .proc_handler = perf_cpu_time_max_percent_handler, .extra1 = &zero, .extra2 = &one_hundred, }, #endif #ifdef CONFIG_KMEMCHECK { .procname = "kmemcheck", .data = &kmemcheck_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "panic_on_warn", .data = &panic_on_warn, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) { .procname = "timer_migration", .data = &sysctl_timer_migration, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = timer_migration_handler, }, #endif #ifdef CONFIG_BPF_SYSCALL { .procname = "unprivileged_bpf_disabled", .data = &sysctl_unprivileged_bpf_disabled, .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif { } }; static struct ctl_table vm_table[] = { { .procname = "overcommit_memory", .data = &sysctl_overcommit_memory, .maxlen = sizeof(sysctl_overcommit_memory), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, }, { .procname = "panic_on_oom", .data = &sysctl_panic_on_oom, .maxlen = sizeof(sysctl_panic_on_oom), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, }, { .procname = "oom_kill_allocating_task", .data = &sysctl_oom_kill_allocating_task, .maxlen = sizeof(sysctl_oom_kill_allocating_task), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "oom_dump_tasks", .data = &sysctl_oom_dump_tasks, .maxlen = sizeof(sysctl_oom_dump_tasks), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, .maxlen = sizeof(sysctl_overcommit_ratio), .mode = 0644, .proc_handler = overcommit_ratio_handler, }, { .procname = "overcommit_kbytes", .data = &sysctl_overcommit_kbytes, .maxlen = sizeof(sysctl_overcommit_kbytes), .mode = 0644, .proc_handler = overcommit_kbytes_handler, }, { .procname = "page-cluster", .data = &page_cluster, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "dirty_background_ratio", .data = &dirty_background_ratio, .maxlen = sizeof(dirty_background_ratio), .mode = 0644, .proc_handler = dirty_background_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "dirty_background_bytes", .data = &dirty_background_bytes, .maxlen = sizeof(dirty_background_bytes), .mode = 0644, .proc_handler = dirty_background_bytes_handler, .extra1 = &one_ul, }, { .procname = "dirty_ratio", .data = &vm_dirty_ratio, .maxlen = sizeof(vm_dirty_ratio), .mode = 0644, .proc_handler = dirty_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "dirty_bytes", .data = &vm_dirty_bytes, .maxlen = sizeof(vm_dirty_bytes), .mode = 0644, .proc_handler = dirty_bytes_handler, .extra1 = &dirty_bytes_min, }, { .procname = "dirty_writeback_centisecs", .data = &dirty_writeback_interval, .maxlen = sizeof(dirty_writeback_interval), .mode = 0644, .proc_handler = dirty_writeback_centisecs_handler, }, { .procname = "dirty_expire_centisecs", .data = &dirty_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "dirtytime_expire_seconds", .data = &dirtytime_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, .proc_handler = dirtytime_interval_handler, .extra1 = &zero, }, { .procname = "nr_pdflush_threads", .mode = 0444 /* read-only */, .proc_handler = pdflush_proc_obsolete, }, { .procname = "swappiness", .data = &vm_swappiness, .maxlen = sizeof(vm_swappiness), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one_hundred, }, #ifdef CONFIG_HUGETLB_PAGE { .procname = "nr_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_sysctl_handler, }, #ifdef CONFIG_NUMA { .procname = "nr_hugepages_mempolicy", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = &hugetlb_mempolicy_sysctl_handler, }, #endif { .procname = "hugetlb_shm_group", .data = &sysctl_hugetlb_shm_group, .maxlen = sizeof(gid_t), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "hugepages_treat_as_movable", .data = &hugepages_treat_as_movable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "nr_overcommit_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_overcommit_handler, }, #endif { .procname = "lowmem_reserve_ratio", .data = &sysctl_lowmem_reserve_ratio, .maxlen = sizeof(sysctl_lowmem_reserve_ratio), .mode = 0644, .proc_handler = lowmem_reserve_ratio_sysctl_handler, }, { .procname = "drop_caches", .data = &sysctl_drop_caches, .maxlen = sizeof(int), .mode = 0644, .proc_handler = drop_caches_sysctl_handler, .extra1 = &one, .extra2 = &four, }, #ifdef CONFIG_COMPACTION { .procname = "compact_memory", .data = &sysctl_compact_memory, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, }, { .procname = "extfrag_threshold", .data = &sysctl_extfrag_threshold, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sysctl_extfrag_handler, .extra1 = &min_extfrag_threshold, .extra2 = &max_extfrag_threshold, }, { .procname = "compact_unevictable_allowed", .data = &sysctl_compact_unevictable_allowed, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_COMPACTION */ { .procname = "min_free_kbytes", .data = &min_free_kbytes, .maxlen = sizeof(min_free_kbytes), .mode = 0644, .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = &zero, }, { .procname = "percpu_pagelist_fraction", .data = &percpu_pagelist_fraction, .maxlen = sizeof(percpu_pagelist_fraction), .mode = 0644, .proc_handler = percpu_pagelist_fraction_sysctl_handler, .extra1 = &zero, }, #ifdef CONFIG_MMU { .procname = "max_map_count", .data = &sysctl_max_map_count, .maxlen = sizeof(sysctl_max_map_count), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, #else { .procname = "nr_trim_pages", .data = &sysctl_nr_trim_pages, .maxlen = sizeof(sysctl_nr_trim_pages), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, #endif { .procname = "laptop_mode", .data = &laptop_mode, .maxlen = sizeof(laptop_mode), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "block_dump", .data = &block_dump, .maxlen = sizeof(block_dump), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, { .procname = "vfs_cache_pressure", .data = &sysctl_vfs_cache_pressure, .maxlen = sizeof(sysctl_vfs_cache_pressure), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT { .procname = "legacy_va_layout", .data = &sysctl_legacy_va_layout, .maxlen = sizeof(sysctl_legacy_va_layout), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif #ifdef CONFIG_NUMA { .procname = "zone_reclaim_mode", .data = &zone_reclaim_mode, .maxlen = sizeof(zone_reclaim_mode), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, { .procname = "min_unmapped_ratio", .data = &sysctl_min_unmapped_ratio, .maxlen = sizeof(sysctl_min_unmapped_ratio), .mode = 0644, .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "min_slab_ratio", .data = &sysctl_min_slab_ratio, .maxlen = sizeof(sysctl_min_slab_ratio), .mode = 0644, .proc_handler = sysctl_min_slab_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, #endif #ifdef CONFIG_SMP { .procname = "stat_interval", .data = &sysctl_stat_interval, .maxlen = sizeof(sysctl_stat_interval), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #endif #ifdef CONFIG_MMU { .procname = "mmap_min_addr", .data = &dac_mmap_min_addr, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = mmap_min_addr_handler, }, #endif #ifdef CONFIG_NUMA { .procname = "numa_zonelist_order", .data = &numa_zonelist_order, .maxlen = NUMA_ZONELIST_ORDER_LEN, .mode = 0644, .proc_handler = numa_zonelist_order_handler, }, #endif #if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \ (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) { .procname = "vdso_enabled", #ifdef CONFIG_X86_32 .data = &vdso32_enabled, .maxlen = sizeof(vdso32_enabled), #else .data = &vdso_enabled, .maxlen = sizeof(vdso_enabled), #endif .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif #ifdef CONFIG_HIGHMEM { .procname = "highmem_is_dirtyable", .data = &vm_highmem_is_dirtyable, .maxlen = sizeof(vm_highmem_is_dirtyable), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_MEMORY_FAILURE { .procname = "memory_failure_early_kill", .data = &sysctl_memory_failure_early_kill, .maxlen = sizeof(sysctl_memory_failure_early_kill), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "memory_failure_recovery", .data = &sysctl_memory_failure_recovery, .maxlen = sizeof(sysctl_memory_failure_recovery), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif { .procname = "user_reserve_kbytes", .data = &sysctl_user_reserve_kbytes, .maxlen = sizeof(sysctl_user_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "admin_reserve_kbytes", .data = &sysctl_admin_reserve_kbytes, .maxlen = sizeof(sysctl_admin_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS { .procname = "mmap_rnd_bits", .data = &mmap_rnd_bits, .maxlen = sizeof(mmap_rnd_bits), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&mmap_rnd_bits_min, .extra2 = (void *)&mmap_rnd_bits_max, }, #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS { .procname = "mmap_rnd_compat_bits", .data = &mmap_rnd_compat_bits, .maxlen = sizeof(mmap_rnd_compat_bits), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&mmap_rnd_compat_bits_min, .extra2 = (void *)&mmap_rnd_compat_bits_max, }, #endif { } }; static struct ctl_table fs_table[] = { { .procname = "inode-nr", .data = &inodes_stat, .maxlen = 2*sizeof(long), .mode = 0444, .proc_handler = proc_nr_inodes, }, { .procname = "inode-state", .data = &inodes_stat, .maxlen = 7*sizeof(long), .mode = 0444, .proc_handler = proc_nr_inodes, }, { .procname = "file-nr", .data = &files_stat, .maxlen = sizeof(files_stat), .mode = 0444, .proc_handler = proc_nr_files, }, { .procname = "file-max", .data = &files_stat.max_files, .maxlen = sizeof(files_stat.max_files), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "nr_open", .data = &sysctl_nr_open, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &sysctl_nr_open_min, .extra2 = &sysctl_nr_open_max, }, { .procname = "dentry-state", .data = &dentry_stat, .maxlen = 6*sizeof(long), .mode = 0444, .proc_handler = proc_nr_dentry, }, { .procname = "overflowuid", .data = &fs_overflowuid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, { .procname = "overflowgid", .data = &fs_overflowgid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, #ifdef CONFIG_FILE_LOCKING { .procname = "leases-enable", .data = &leases_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DNOTIFY { .procname = "dir-notify-enable", .data = &dir_notify_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MMU #ifdef CONFIG_FILE_LOCKING { .procname = "lease-break-time", .data = &lease_break_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_AIO { .procname = "aio-nr", .data = &aio_nr, .maxlen = sizeof(aio_nr), .mode = 0444, .proc_handler = proc_doulongvec_minmax, }, { .procname = "aio-max-nr", .data = &aio_max_nr, .maxlen = sizeof(aio_max_nr), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #endif /* CONFIG_AIO */ #ifdef CONFIG_INOTIFY_USER { .procname = "inotify", .mode = 0555, .child = inotify_table, }, #endif #ifdef CONFIG_EPOLL { .procname = "epoll", .mode = 0555, .child = epoll_table, }, #endif #endif { .procname = "protected_symlinks", .data = &sysctl_protected_symlinks, .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "protected_hardlinks", .data = &sysctl_protected_hardlinks, .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "suid_dumpable", .data = &suid_dumpable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_coredump, .extra1 = &zero, .extra2 = &two, }, #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) { .procname = "binfmt_misc", .mode = 0555, .child = sysctl_mount_point, }, #endif { .procname = "pipe-max-size", .data = &pipe_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &pipe_proc_fn, .extra1 = &pipe_min_size, }, { } }; static struct ctl_table debug_table[] = { #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE { .procname = "exception-trace", .data = &show_unhandled_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif #if defined(CONFIG_OPTPROBES) { .procname = "kprobes-optimization", .data = &sysctl_kprobes_optimization, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_kprobes_optimization_handler, .extra1 = &zero, .extra2 = &one, }, #endif { } }; static struct ctl_table dev_table[] = { { } }; int __init sysctl_init(void) { struct ctl_table_header *hdr; hdr = register_sysctl_table(sysctl_base_table); kmemleak_not_leak(hdr); return 0; } #endif /* CONFIG_SYSCTL */ /* * /proc/sys support */ #ifdef CONFIG_PROC_SYSCTL static int _proc_do_string(char *data, int maxlen, int write, char __user *buffer, size_t *lenp, loff_t *ppos) { size_t len; char __user *p; char c; if (!data || !maxlen || !*lenp) { *lenp = 0; return 0; } if (write) { if (sysctl_writes_strict == SYSCTL_WRITES_STRICT) { /* Only continue writes not past the end of buffer. */ len = strlen(data); if (len > maxlen - 1) len = maxlen - 1; if (*ppos > len) return 0; len = *ppos; } else { /* Start writing from beginning of buffer. */ len = 0; } *ppos += *lenp; p = buffer; while ((p - buffer) < *lenp && len < maxlen - 1) { if (get_user(c, p++)) return -EFAULT; if (c == 0 || c == '\n') break; data[len++] = c; } data[len] = 0; } else { len = strlen(data); if (len > maxlen) len = maxlen; if (*ppos > len) { *lenp = 0; return 0; } data += *ppos; len -= *ppos; if (len > *lenp) len = *lenp; if (len) if (copy_to_user(buffer, data, len)) return -EFAULT; if (len < *lenp) { if (put_user('\n', buffer + len)) return -EFAULT; len++; } *lenp = len; *ppos += len; } return 0; } static void warn_sysctl_write(struct ctl_table *table) { pr_warn_once("%s wrote to %s when file position was not 0!\n" "This will not be supported in the future. To silence this\n" "warning, set kernel.sysctl_writes_strict = -1\n", current->comm, table->procname); } /** * proc_dostring - read a string sysctl * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes a string from/to the user buffer. If the kernel * buffer provided is not large enough to hold the string, the * string is truncated. The copied string is %NULL-terminated. * If the string is being read by the user process, it is copied * and a newline '\n' is added. It is truncated if the buffer is * not large enough. * * Returns 0 on success. */ int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { if (write && *ppos && sysctl_writes_strict == SYSCTL_WRITES_WARN) warn_sysctl_write(table); return _proc_do_string((char *)(table->data), table->maxlen, write, (char __user *)buffer, lenp, ppos); } static size_t proc_skip_spaces(char **buf) { size_t ret; char *tmp = skip_spaces(*buf); ret = tmp - *buf; *buf = tmp; return ret; } static void proc_skip_char(char **buf, size_t *size, const char v) { while (*size) { if (**buf != v) break; (*size)--; (*buf)++; } } #define TMPBUFLEN 22 /** * proc_get_long - reads an ASCII formatted integer from a user buffer * * @buf: a kernel buffer * @size: size of the kernel buffer * @val: this is where the number will be stored * @neg: set to %TRUE if number is negative * @perm_tr: a vector which contains the allowed trailers * @perm_tr_len: size of the perm_tr vector * @tr: pointer to store the trailer character * * In case of success %0 is returned and @buf and @size are updated with * the amount of bytes read. If @tr is non-NULL and a trailing * character exists (size is non-zero after returning from this * function), @tr is updated with the trailing character. */ static int proc_get_long(char **buf, size_t *size, unsigned long *val, bool *neg, const char *perm_tr, unsigned perm_tr_len, char *tr) { int len; char *p, tmp[TMPBUFLEN]; if (!*size) return -EINVAL; len = *size; if (len > TMPBUFLEN - 1) len = TMPBUFLEN - 1; memcpy(tmp, *buf, len); tmp[len] = 0; p = tmp; if (*p == '-' && *size > 1) { *neg = true; p++; } else *neg = false; if (!isdigit(*p)) return -EINVAL; *val = simple_strtoul(p, &p, 0); len = p - tmp; /* We don't know if the next char is whitespace thus we may accept * invalid integers (e.g. 1234...a) or two integers instead of one * (e.g. 123...1). So lets not allow such large numbers. */ if (len == TMPBUFLEN - 1) return -EINVAL; if (len < *size && perm_tr_len && !memchr(perm_tr, *p, perm_tr_len)) return -EINVAL; if (tr && (len < *size)) *tr = *p; *buf += len; *size -= len; return 0; } /** * proc_put_long - converts an integer to a decimal ASCII formatted string * * @buf: the user buffer * @size: the size of the user buffer * @val: the integer to be converted * @neg: sign of the number, %TRUE for negative * * In case of success %0 is returned and @buf and @size are updated with * the amount of bytes written. */ static int proc_put_long(void __user **buf, size_t *size, unsigned long val, bool neg) { int len; char tmp[TMPBUFLEN], *p = tmp; sprintf(p, "%s%lu", neg ? "-" : "", val); len = strlen(tmp); if (len > *size) len = *size; if (copy_to_user(*buf, tmp, len)) return -EFAULT; *size -= len; *buf += len; return 0; } #undef TMPBUFLEN static int proc_put_char(void __user **buf, size_t *size, char c) { if (*size) { char __user **buffer = (char __user **)buf; if (put_user(c, *buffer)) return -EFAULT; (*size)--, (*buffer)++; *buf = *buffer; } return 0; } static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (*negp) { if (*lvalp > (unsigned long) INT_MAX + 1) return -EINVAL; *valp = -*lvalp; } else { if (*lvalp > (unsigned long) INT_MAX) return -EINVAL; *valp = *lvalp; } } else { int val = *valp; if (val < 0) { *negp = true; *lvalp = -(unsigned long)val; } else { *negp = false; *lvalp = (unsigned long)val; } } return 0; } static const char proc_wspace_sep[] = { ' ', '\t', '\n' }; static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(bool *negp, unsigned long *lvalp, int *valp, int write, void *data), void *data) { int *i, vleft, first = 1, err = 0; size_t left; char *kbuf = NULL, *p; if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } i = (int *) tbl_data; vleft = table->maxlen / sizeof(*i); left = *lenp; if (!conv) conv = do_proc_dointvec_conv; if (write) { if (*ppos) { switch (sysctl_writes_strict) { case SYSCTL_WRITES_STRICT: goto out; case SYSCTL_WRITES_WARN: warn_sysctl_write(table); break; default: break; } } if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; p = kbuf = memdup_user_nul(buffer, left); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); } for (; left && vleft--; i++, first=0) { unsigned long lval; bool neg; if (write) { left -= proc_skip_spaces(&p); if (!left) break; err = proc_get_long(&p, &left, &lval, &neg, proc_wspace_sep, sizeof(proc_wspace_sep), NULL); if (err) break; if (conv(&neg, &lval, i, 1, data)) { err = -EINVAL; break; } } else { if (conv(&neg, &lval, i, 0, data)) { err = -EINVAL; break; } if (!first) err = proc_put_char(&buffer, &left, '\t'); if (err) break; err = proc_put_long(&buffer, &left, lval, neg); if (err) break; } } if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err && left) left -= proc_skip_spaces(&p); if (write) { kfree(kbuf); if (first) return err ? : -EINVAL; } *lenp -= left; out: *ppos += *lenp; return err; } static int do_proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(bool *negp, unsigned long *lvalp, int *valp, int write, void *data), void *data) { return __do_proc_dointvec(table->data, table, write, buffer, lenp, ppos, conv, data); } /** * proc_dointvec - read a vector of integers * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * * Returns 0 on success. */ int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, NULL,NULL); } /* * Taint values can only be increased * This means we can safely use a temporary. */ static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; unsigned long tmptaint = get_taint(); int err; if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; t = *table; t.data = &tmptaint; err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err; if (write) { /* * Poor man's atomic or. Not worth adding a primitive * to everyone's atomic.h for this */ int i; for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) { if ((tmptaint >> i) & 1) add_taint(i, LOCKDEP_STILL_OK); } } return err; } #ifdef CONFIG_PRINTK static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } #endif struct do_proc_dointvec_minmax_conv_param { int *min; int *max; }; static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { struct do_proc_dointvec_minmax_conv_param *param = data; if (write) { int val = *negp ? -*lvalp : *lvalp; if ((param->min && *param->min > val) || (param->max && *param->max < val)) return -EINVAL; *valp = val; } else { int val = *valp; if (val < 0) { *negp = true; *lvalp = -(unsigned long)val; } else { *negp = false; *lvalp = (unsigned long)val; } } return 0; } /** * proc_dointvec_minmax - read a vector of integers with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct do_proc_dointvec_minmax_conv_param param = { .min = (int *) table->extra1, .max = (int *) table->extra2, }; return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_dointvec_minmax_conv, &param); } static void validate_coredump_safety(void) { #ifdef CONFIG_COREDUMP if (suid_dumpable == SUID_DUMP_ROOT && core_pattern[0] != '/' && core_pattern[0] != '|') { printk(KERN_WARNING "Unsafe core_pattern used with "\ "suid_dumpable=2. Pipe handler or fully qualified "\ "core dump path required.\n"); } #endif } static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (!error) validate_coredump_safety(); return error; } #ifdef CONFIG_COREDUMP static int proc_dostring_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error = proc_dostring(table, write, buffer, lenp, ppos); if (!error) validate_coredump_safety(); return error; } #endif static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul, unsigned long convdiv) { unsigned long *i, *min, *max; int vleft, first = 1, err = 0; size_t left; char *kbuf = NULL, *p; if (!data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } i = (unsigned long *) data; min = (unsigned long *) table->extra1; max = (unsigned long *) table->extra2; vleft = table->maxlen / sizeof(unsigned long); left = *lenp; if (write) { if (*ppos) { switch (sysctl_writes_strict) { case SYSCTL_WRITES_STRICT: goto out; case SYSCTL_WRITES_WARN: warn_sysctl_write(table); break; default: break; } } if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; p = kbuf = memdup_user_nul(buffer, left); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); } for (; left && vleft--; i++, first = 0) { unsigned long val; if (write) { bool neg; left -= proc_skip_spaces(&p); err = proc_get_long(&p, &left, &val, &neg, proc_wspace_sep, sizeof(proc_wspace_sep), NULL); if (err) break; if (neg) continue; if ((min && val < *min) || (max && val > *max)) continue; *i = val; } else { val = convdiv * (*i) / convmul; if (!first) { err = proc_put_char(&buffer, &left, '\t'); if (err) break; } err = proc_put_long(&buffer, &left, val, false); if (err) break; } } if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err) left -= proc_skip_spaces(&p); if (write) { kfree(kbuf); if (first) return err ? : -EINVAL; } *lenp -= left; out: *ppos += *lenp; return err; } static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul, unsigned long convdiv) { return __do_proc_doulongvec_minmax(table->data, table, write, buffer, lenp, ppos, convmul, convdiv); } /** * proc_doulongvec_minmax - read a vector of long integers with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long * values from/to the user buffer, treated as an ASCII string. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l); } /** * proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long * values from/to the user buffer, treated as an ASCII string. The values * are treated as milliseconds, and converted to jiffies when they are stored. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, HZ, 1000l); } static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (*lvalp > LONG_MAX / HZ) return 1; *valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = -(unsigned long)val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = lval / HZ; } return 0; } static int do_proc_dointvec_userhz_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (USER_HZ < HZ && *lvalp > (LONG_MAX / HZ) * USER_HZ) return 1; *valp = clock_t_to_jiffies(*negp ? -*lvalp : *lvalp); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = -(unsigned long)val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = jiffies_to_clock_t(lval); } return 0; } static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { unsigned long jif = msecs_to_jiffies(*negp ? -*lvalp : *lvalp); if (jif > INT_MAX) return 1; *valp = (int)jif; } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = -(unsigned long)val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = jiffies_to_msecs(lval); } return 0; } /** * proc_dointvec_jiffies - read a vector of integers as seconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in seconds, and are converted into * jiffies. * * Returns 0 on success. */ int proc_dointvec_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, do_proc_dointvec_jiffies_conv,NULL); } /** * proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: pointer to the file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in 1/USER_HZ seconds, and * are converted into jiffies. * * Returns 0 on success. */ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, do_proc_dointvec_userhz_jiffies_conv,NULL); } /** * proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * @ppos: the current position in the file * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in 1/1000 seconds, and * are converted into jiffies. * * Returns 0 on success. */ int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_dointvec_ms_jiffies_conv, NULL); } static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct pid *new_pid; pid_t tmp; int r; tmp = pid_vnr(cad_pid); r = __do_proc_dointvec(&tmp, table, write, buffer, lenp, ppos, NULL, NULL); if (r || !write) return r; new_pid = find_get_pid(tmp); if (!new_pid) return -ESRCH; put_pid(xchg(&cad_pid, new_pid)); return 0; } /** * proc_do_large_bitmap - read/write from/to a large bitmap * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * The bitmap is stored at table->data and the bitmap length (in bits) * in table->maxlen. * * We use a range comma separated format (e.g. 1,3-4,10-10) so that * large bitmaps may be represented in a compact manner. Writing into * the file will clear the bitmap then update it with the given input. * * Returns 0 on success. */ int proc_do_large_bitmap(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int err = 0; bool first = 1; size_t left = *lenp; unsigned long bitmap_len = table->maxlen; unsigned long *bitmap = *(unsigned long **) table->data; unsigned long *tmp_bitmap = NULL; char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c; if (!bitmap || !bitmap_len || !left || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { char *kbuf, *p; if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; p = kbuf = memdup_user_nul(buffer, left); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); tmp_bitmap = kzalloc(BITS_TO_LONGS(bitmap_len) * sizeof(unsigned long), GFP_KERNEL); if (!tmp_bitmap) { kfree(kbuf); return -ENOMEM; } proc_skip_char(&p, &left, '\n'); while (!err && left) { unsigned long val_a, val_b; bool neg; err = proc_get_long(&p, &left, &val_a, &neg, tr_a, sizeof(tr_a), &c); if (err) break; if (val_a >= bitmap_len || neg) { err = -EINVAL; break; } val_b = val_a; if (left) { p++; left--; } if (c == '-') { err = proc_get_long(&p, &left, &val_b, &neg, tr_b, sizeof(tr_b), &c); if (err) break; if (val_b >= bitmap_len || neg || val_a > val_b) { err = -EINVAL; break; } if (left) { p++; left--; } } bitmap_set(tmp_bitmap, val_a, val_b - val_a + 1); first = 0; proc_skip_char(&p, &left, '\n'); } kfree(kbuf); } else { unsigned long bit_a, bit_b = 0; while (left) { bit_a = find_next_bit(bitmap, bitmap_len, bit_b); if (bit_a >= bitmap_len) break; bit_b = find_next_zero_bit(bitmap, bitmap_len, bit_a + 1) - 1; if (!first) { err = proc_put_char(&buffer, &left, ','); if (err) break; } err = proc_put_long(&buffer, &left, bit_a, false); if (err) break; if (bit_a != bit_b) { err = proc_put_char(&buffer, &left, '-'); if (err) break; err = proc_put_long(&buffer, &left, bit_b, false); if (err) break; } first = 0; bit_b++; } if (!err) err = proc_put_char(&buffer, &left, '\n'); } if (!err) { if (write) { if (*ppos) bitmap_or(bitmap, bitmap, tmp_bitmap, bitmap_len); else bitmap_copy(bitmap, tmp_bitmap, bitmap_len); } kfree(tmp_bitmap); *lenp -= left; *ppos += *lenp; return 0; } else { kfree(tmp_bitmap); return err; } } #else /* CONFIG_PROC_SYSCTL */ int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } #endif /* CONFIG_PROC_SYSCTL */ /* * No sense putting this after each symbol definition, twice, * exception granted :-) */ EXPORT_SYMBOL(proc_dointvec); EXPORT_SYMBOL(proc_dointvec_jiffies); EXPORT_SYMBOL(proc_dointvec_minmax); EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); EXPORT_SYMBOL(proc_dointvec_ms_jiffies); EXPORT_SYMBOL(proc_dostring); EXPORT_SYMBOL(proc_doulongvec_minmax); EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
./CrossVul/dataset_final_sorted/CWE-399/c/bad_4984_4
crossvul-cpp_data_bad_905_1
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V V IIIII FFFFF FFFFF % % V V I F F % % V V I FFF FFF % % V V I F F % % V IIIII F F % % % % % % Read/Write Khoros Visualization Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/module.h" /* Forward declarations. */ static MagickBooleanType WriteVIFFImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s V I F F % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsVIFF() returns MagickTrue if the image format type, identified by the % magick string, is VIFF. % % The format of the IsVIFF method is: % % MagickBooleanType IsVIFF(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsVIFF(const unsigned char *magick,const size_t length) { if (length < 2) return(MagickFalse); if (memcmp(magick,"\253\001",2) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadVIFFImage() reads a Khoros Visualization image file and returns % it. It allocates the memory necessary for the new Image structure and % returns a pointer to the new image. % % The format of the ReadVIFFImage method is: % % Image *ReadVIFFImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Method ReadVIFFImage returns a pointer to the image after % reading. A null image is returned if there is a memory shortage or if % the image cannot be read. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadVIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define VFF_CM_genericRGB 15 #define VFF_CM_ntscRGB 1 #define VFF_CM_NONE 0 #define VFF_DEP_DECORDER 0x4 #define VFF_DEP_NSORDER 0x8 #define VFF_DES_RAW 0 #define VFF_LOC_IMPLICIT 1 #define VFF_MAPTYP_NONE 0 #define VFF_MAPTYP_1_BYTE 1 #define VFF_MAPTYP_2_BYTE 2 #define VFF_MAPTYP_4_BYTE 4 #define VFF_MAPTYP_FLOAT 5 #define VFF_MAPTYP_DOUBLE 7 #define VFF_MS_NONE 0 #define VFF_MS_ONEPERBAND 1 #define VFF_MS_SHARED 3 #define VFF_TYP_BIT 0 #define VFF_TYP_1_BYTE 1 #define VFF_TYP_2_BYTE 2 #define VFF_TYP_4_BYTE 4 #define VFF_TYP_FLOAT 5 #define VFF_TYP_DOUBLE 9 typedef struct _ViffInfo { unsigned char identifier, file_type, release, version, machine_dependency, reserve[3]; char comment[512]; unsigned int rows, columns, subrows; int x_offset, y_offset; float x_bits_per_pixel, y_bits_per_pixel; unsigned int location_type, location_dimension, number_of_images, number_data_bands, data_storage_type, data_encode_scheme, map_scheme, map_storage_type, map_rows, map_columns, map_subrows, map_enable, maps_per_cycle, color_space_model; } ViffInfo; double min_value, scale_factor, value; Image *image; int bit; MagickBooleanType status; MagickSizeType number_pixels; register ssize_t x; register Quantum *q; register ssize_t i; register unsigned char *p; size_t bytes_per_pixel, max_packets, quantum; ssize_t count, y; unsigned char *pixels; unsigned long lsb_first; ViffInfo viff_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read VIFF header (1024 bytes). */ count=ReadBlob(image,1,&viff_info.identifier); do { /* Verify VIFF identifier. */ if ((count != 1) || ((unsigned char) viff_info.identifier != 0xab)) ThrowReaderException(CorruptImageError,"NotAVIFFImage"); /* Initialize VIFF image. */ (void) ReadBlob(image,sizeof(viff_info.file_type),&viff_info.file_type); (void) ReadBlob(image,sizeof(viff_info.release),&viff_info.release); (void) ReadBlob(image,sizeof(viff_info.version),&viff_info.version); (void) ReadBlob(image,sizeof(viff_info.machine_dependency), &viff_info.machine_dependency); (void) ReadBlob(image,sizeof(viff_info.reserve),viff_info.reserve); count=ReadBlob(image,512,(unsigned char *) viff_info.comment); if (count != 512) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); viff_info.comment[511]='\0'; if (strlen(viff_info.comment) > 4) (void) SetImageProperty(image,"comment",viff_info.comment,exception); if ((viff_info.machine_dependency == VFF_DEP_DECORDER) || (viff_info.machine_dependency == VFF_DEP_NSORDER)) image->endian=LSBEndian; else image->endian=MSBEndian; viff_info.rows=ReadBlobLong(image); viff_info.columns=ReadBlobLong(image); viff_info.subrows=ReadBlobLong(image); viff_info.x_offset=ReadBlobSignedLong(image); viff_info.y_offset=ReadBlobSignedLong(image); viff_info.x_bits_per_pixel=(float) ReadBlobLong(image); viff_info.y_bits_per_pixel=(float) ReadBlobLong(image); viff_info.location_type=ReadBlobLong(image); viff_info.location_dimension=ReadBlobLong(image); viff_info.number_of_images=ReadBlobLong(image); viff_info.number_data_bands=ReadBlobLong(image); viff_info.data_storage_type=ReadBlobLong(image); viff_info.data_encode_scheme=ReadBlobLong(image); viff_info.map_scheme=ReadBlobLong(image); viff_info.map_storage_type=ReadBlobLong(image); viff_info.map_rows=ReadBlobLong(image); viff_info.map_columns=ReadBlobLong(image); viff_info.map_subrows=ReadBlobLong(image); viff_info.map_enable=ReadBlobLong(image); viff_info.maps_per_cycle=ReadBlobLong(image); viff_info.color_space_model=ReadBlobLong(image); for (i=0; i < 420; i++) (void) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); number_pixels=(MagickSizeType) viff_info.columns*viff_info.rows; if (number_pixels > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); if (number_pixels != (size_t) number_pixels) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (number_pixels == 0) ThrowReaderException(CoderError,"ImageColumnOrRowSizeIsNotSupported"); image->columns=viff_info.rows; image->rows=viff_info.columns; image->depth=viff_info.x_bits_per_pixel <= 8 ? 8UL : MAGICKCORE_QUANTUM_DEPTH; image->alpha_trait=viff_info.number_data_bands == 4 ? BlendPixelTrait : UndefinedPixelTrait; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); /* Verify that we can read this VIFF image. */ if ((viff_info.number_data_bands < 1) || (viff_info.number_data_bands > 4)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((viff_info.data_storage_type != VFF_TYP_BIT) && (viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.data_storage_type != VFF_TYP_2_BYTE) && (viff_info.data_storage_type != VFF_TYP_4_BYTE) && (viff_info.data_storage_type != VFF_TYP_FLOAT) && (viff_info.data_storage_type != VFF_TYP_DOUBLE)) ThrowReaderException(CoderError,"DataStorageTypeIsNotSupported"); if (viff_info.data_encode_scheme != VFF_DES_RAW) ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); if ((viff_info.map_storage_type != VFF_MAPTYP_NONE) && (viff_info.map_storage_type != VFF_MAPTYP_1_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_2_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_4_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_FLOAT) && (viff_info.map_storage_type != VFF_MAPTYP_DOUBLE)) ThrowReaderException(CoderError,"MapStorageTypeIsNotSupported"); if ((viff_info.color_space_model != VFF_CM_NONE) && (viff_info.color_space_model != VFF_CM_ntscRGB) && (viff_info.color_space_model != VFF_CM_genericRGB)) ThrowReaderException(CoderError,"ColorspaceModelIsNotSupported"); if (viff_info.location_type != VFF_LOC_IMPLICIT) ThrowReaderException(CoderError,"LocationTypeIsNotSupported"); if (viff_info.number_of_images != 1) ThrowReaderException(CoderError,"NumberOfImagesIsNotSupported"); if (viff_info.map_rows == 0) viff_info.map_scheme=VFF_MS_NONE; switch ((int) viff_info.map_scheme) { case VFF_MS_NONE: { if (viff_info.number_data_bands < 3) { /* Create linear color ramp. */ if (viff_info.data_storage_type == VFF_TYP_BIT) image->colors=2; else if (viff_info.data_storage_type == VFF_MAPTYP_1_BYTE) image->colors=256UL; else image->colors=image->depth <= 8 ? 256UL : 65536UL; status=AcquireImageColormap(image,image->colors,exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } break; } case VFF_MS_ONEPERBAND: case VFF_MS_SHARED: { unsigned char *viff_colormap; /* Allocate VIFF colormap. */ switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_1_BYTE: bytes_per_pixel=1; break; case VFF_MAPTYP_2_BYTE: bytes_per_pixel=2; break; case VFF_MAPTYP_4_BYTE: bytes_per_pixel=4; break; case VFF_MAPTYP_FLOAT: bytes_per_pixel=4; break; case VFF_MAPTYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } image->colors=viff_info.map_columns; if ((MagickSizeType) (viff_info.map_rows*image->colors) > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if ((MagickSizeType) viff_info.map_rows > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); if ((MagickSizeType) viff_info.map_rows > (viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors, viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap)); if (viff_colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Read VIFF raster colormap. */ count=ReadBlob(image,bytes_per_pixel*image->colors*viff_info.map_rows, viff_colormap); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: { MSBOrderShort(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } case VFF_MAPTYP_4_BYTE: case VFF_MAPTYP_FLOAT: { MSBOrderLong(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } default: break; } for (i=0; i < (ssize_t) (viff_info.map_rows*image->colors); i++) { switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: value=1.0*((short *) viff_colormap)[i]; break; case VFF_MAPTYP_4_BYTE: value=1.0*((int *) viff_colormap)[i]; break; case VFF_MAPTYP_FLOAT: value=((float *) viff_colormap)[i]; break; case VFF_MAPTYP_DOUBLE: value=((double *) viff_colormap)[i]; break; default: value=1.0*viff_colormap[i]; break; } if (i < (ssize_t) image->colors) { image->colormap[i].red=(MagickRealType) ScaleCharToQuantum((unsigned char) value); image->colormap[i].green=(MagickRealType) ScaleCharToQuantum((unsigned char) value); image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum((unsigned char) value); } else if (i < (ssize_t) (2*image->colors)) image->colormap[i % image->colors].green=(MagickRealType) ScaleCharToQuantum((unsigned char) value); else if (i < (ssize_t) (3*image->colors)) image->colormap[i % image->colors].blue=(MagickRealType) ScaleCharToQuantum((unsigned char) value); } viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap); break; } default: ThrowReaderException(CoderError,"ColormapTypeNotSupported"); } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if (viff_info.data_storage_type == VFF_TYP_BIT) { /* Create bi-level colormap. */ image->colors=2; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); image->colorspace=GRAYColorspace; } /* Allocate VIFF pixels. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: bytes_per_pixel=2; break; case VFF_TYP_4_BYTE: bytes_per_pixel=4; break; case VFF_TYP_FLOAT: bytes_per_pixel=4; break; case VFF_TYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } if (viff_info.data_storage_type == VFF_TYP_BIT) { if (HeapOverflowSanityCheck((image->columns+7UL) >> 3UL,image->rows) != MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); max_packets=((image->columns+7UL) >> 3UL)*image->rows; } else { if (HeapOverflowSanityCheck((size_t) number_pixels,viff_info.number_data_bands) != MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); max_packets=(size_t) (number_pixels*viff_info.number_data_bands); } if ((MagickSizeType) (bytes_per_pixel*max_packets) > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); pixels=(unsigned char *) AcquireQuantumMemory((size_t) MagickMax( number_pixels,max_packets),bytes_per_pixel*sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(pixels,0,MagickMax(number_pixels,max_packets)* bytes_per_pixel*sizeof(*pixels)); count=ReadBlob(image,bytes_per_pixel*max_packets,pixels); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: { MSBOrderShort(pixels,bytes_per_pixel*max_packets); break; } case VFF_TYP_4_BYTE: case VFF_TYP_FLOAT: { MSBOrderLong(pixels,bytes_per_pixel*max_packets); break; } default: break; } min_value=0.0; scale_factor=1.0; if ((viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.map_scheme == VFF_MS_NONE)) { double max_value; /* Determine scale factor. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[0]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[0]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[0]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[0]; break; default: value=1.0*pixels[0]; break; } max_value=value; min_value=value; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (value > max_value) max_value=value; else if (value < min_value) min_value=value; } if ((min_value == 0) && (max_value == 0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); } /* Convert pixels to Quantum size. */ p=(unsigned char *) pixels; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (viff_info.map_scheme == VFF_MS_NONE) { value=(value-min_value)*scale_factor; if (value > QuantumRange) value=QuantumRange; else if (value < 0) value=0; } *p=(unsigned char) ((Quantum) value); p++; } /* Convert VIFF raster image to pixel packets. */ p=(unsigned char *) pixels; if (viff_info.data_storage_type == VFF_TYP_BIT) { /* Convert bitmap scanline. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) (image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelGreen(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelBlue(image,quantum == 0 ? 0 : QuantumRange,q); if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) quantum,q); q+=GetPixelChannels(image); } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (int) (image->columns % 8); bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelGreen(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelBlue(image,quantum == 0 ? 0 : QuantumRange,q); if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) quantum,q); q+=GetPixelChannels(image); } p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (image->storage_class == PseudoClass) for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,*p++,q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else { /* Convert DirectColor scanline. */ number_pixels=(MagickSizeType) image->columns*image->rows; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*p),q); SetPixelGreen(image,ScaleCharToQuantum(*(p+number_pixels)),q); SetPixelBlue(image,ScaleCharToQuantum(*(p+2*number_pixels)),q); if (image->colors != 0) { ssize_t index; index=(ssize_t) GetPixelRed(image,q); SetPixelRed(image,ClampToQuantum(image->colormap[ ConstrainColormapIndex(image,index,exception)].red),q); index=(ssize_t) GetPixelGreen(image,q); SetPixelGreen(image,ClampToQuantum(image->colormap[ ConstrainColormapIndex(image,index,exception)].green),q); index=(ssize_t) GetPixelBlue(image,q); SetPixelBlue(image,ClampToQuantum(image->colormap[ ConstrainColormapIndex(image,index,exception)].blue),q); } SetPixelAlpha(image,image->alpha_trait != UndefinedPixelTrait ? ScaleCharToQuantum(*(p+number_pixels*3)) : OpaqueAlpha,q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if (image->storage_class == PseudoClass) (void) SyncImage(image,exception); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; count=ReadBlob(image,1,&viff_info.identifier); if ((count == 1) && (viff_info.identifier == 0xab)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while ((count != 0) && (viff_info.identifier == 0xab)); (void) CloseBlob(image); if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterVIFFImage() adds properties for the VIFF image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterVIFFImage method is: % % size_t RegisterVIFFImage(void) % */ ModuleExport size_t RegisterVIFFImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("VIFF","VIFF","Khoros Visualization image"); entry->decoder=(DecodeImageHandler *) ReadVIFFImage; entry->encoder=(EncodeImageHandler *) WriteVIFFImage; entry->magick=(IsImageFormatHandler *) IsVIFF; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("VIFF","XV","Khoros Visualization image"); entry->decoder=(DecodeImageHandler *) ReadVIFFImage; entry->encoder=(EncodeImageHandler *) WriteVIFFImage; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterVIFFImage() removes format registrations made by the % VIFF module from the list of supported formats. % % The format of the UnregisterVIFFImage method is: % % UnregisterVIFFImage(void) % */ ModuleExport void UnregisterVIFFImage(void) { (void) UnregisterMagickInfo("VIFF"); (void) UnregisterMagickInfo("XV"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteVIFFImage() writes an image to a file in the VIFF image format. % % The format of the WriteVIFFImage method is: % % MagickBooleanType WriteVIFFImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WriteVIFFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define VFF_CM_genericRGB 15 #define VFF_CM_NONE 0 #define VFF_DEP_IEEEORDER 0x2 #define VFF_DES_RAW 0 #define VFF_LOC_IMPLICIT 1 #define VFF_MAPTYP_NONE 0 #define VFF_MAPTYP_1_BYTE 1 #define VFF_MS_NONE 0 #define VFF_MS_ONEPERBAND 1 #define VFF_TYP_BIT 0 #define VFF_TYP_1_BYTE 1 typedef struct _ViffInfo { char identifier, file_type, release, version, machine_dependency, reserve[3], comment[512]; size_t rows, columns, subrows; int x_offset, y_offset; unsigned int x_bits_per_pixel, y_bits_per_pixel, location_type, location_dimension, number_of_images, number_data_bands, data_storage_type, data_encode_scheme, map_scheme, map_storage_type, map_rows, map_columns, map_subrows, map_enable, maps_per_cycle, color_space_model; } ViffInfo; const char *value; MagickBooleanType status; MagickOffsetType scene; MagickSizeType number_pixels, packets; MemoryInfo *pixel_info; register const Quantum *p; register ssize_t x; register ssize_t i; register unsigned char *q; size_t imageListLength; ssize_t y; unsigned char *pixels; ViffInfo viff_info; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) memset(&viff_info,0,sizeof(ViffInfo)); scene=0; imageListLength=GetImageListLength(image); do { /* Initialize VIFF image structure. */ (void) TransformImageColorspace(image,sRGBColorspace,exception); DisableMSCWarning(4310) viff_info.identifier=(char) 0xab; RestoreMSCWarning viff_info.file_type=1; viff_info.release=1; viff_info.version=3; viff_info.machine_dependency=VFF_DEP_IEEEORDER; /* IEEE byte ordering */ *viff_info.comment='\0'; value=GetImageProperty(image,"comment",exception); if (value != (const char *) NULL) (void) CopyMagickString(viff_info.comment,value,MagickMin(strlen(value), 511)+1); viff_info.rows=image->columns; viff_info.columns=image->rows; viff_info.subrows=0; viff_info.x_offset=(~0); viff_info.y_offset=(~0); viff_info.x_bits_per_pixel=0; viff_info.y_bits_per_pixel=0; viff_info.location_type=VFF_LOC_IMPLICIT; viff_info.location_dimension=0; viff_info.number_of_images=1; viff_info.data_encode_scheme=VFF_DES_RAW; viff_info.map_scheme=VFF_MS_NONE; viff_info.map_storage_type=VFF_MAPTYP_NONE; viff_info.map_rows=0; viff_info.map_columns=0; viff_info.map_subrows=0; viff_info.map_enable=1; /* no colormap */ viff_info.maps_per_cycle=0; number_pixels=(MagickSizeType) image->columns*image->rows; if (image->storage_class == DirectClass) { /* Full color VIFF raster. */ viff_info.number_data_bands=image->alpha_trait ? 4U : 3U; viff_info.color_space_model=VFF_CM_genericRGB; viff_info.data_storage_type=VFF_TYP_1_BYTE; packets=viff_info.number_data_bands*number_pixels; } else { viff_info.number_data_bands=1; viff_info.color_space_model=VFF_CM_NONE; viff_info.data_storage_type=VFF_TYP_1_BYTE; packets=number_pixels; if (SetImageGray(image,exception) == MagickFalse) { /* Colormapped VIFF raster. */ viff_info.map_scheme=VFF_MS_ONEPERBAND; viff_info.map_storage_type=VFF_MAPTYP_1_BYTE; viff_info.map_rows=3; viff_info.map_columns=(unsigned int) image->colors; } else if (image->colors <= 2) { /* Monochrome VIFF raster. */ viff_info.data_storage_type=VFF_TYP_BIT; packets=((image->columns+7) >> 3)*image->rows; } } /* Write VIFF image header (pad to 1024 bytes). */ (void) WriteBlob(image,sizeof(viff_info.identifier),(unsigned char *) &viff_info.identifier); (void) WriteBlob(image,sizeof(viff_info.file_type),(unsigned char *) &viff_info.file_type); (void) WriteBlob(image,sizeof(viff_info.release),(unsigned char *) &viff_info.release); (void) WriteBlob(image,sizeof(viff_info.version),(unsigned char *) &viff_info.version); (void) WriteBlob(image,sizeof(viff_info.machine_dependency), (unsigned char *) &viff_info.machine_dependency); (void) WriteBlob(image,sizeof(viff_info.reserve),(unsigned char *) viff_info.reserve); (void) WriteBlob(image,512,(unsigned char *) viff_info.comment); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.rows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.columns); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.subrows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.x_offset); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.y_offset); viff_info.x_bits_per_pixel=(unsigned int) ((63 << 24) | (128 << 16)); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.x_bits_per_pixel); viff_info.y_bits_per_pixel=(unsigned int) ((63 << 24) | (128 << 16)); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.y_bits_per_pixel); (void) WriteBlobMSBLong(image,viff_info.location_type); (void) WriteBlobMSBLong(image,viff_info.location_dimension); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.number_of_images); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.number_data_bands); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.data_storage_type); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.data_encode_scheme); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_scheme); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_storage_type); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_rows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_columns); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_subrows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_enable); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.maps_per_cycle); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.color_space_model); for (i=0; i < 420; i++) (void) WriteBlobByte(image,'\0'); /* Convert MIFF to VIFF raster pixels. */ pixel_info=AcquireVirtualMemory((size_t) packets,sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); q=pixels; if (image->storage_class == DirectClass) { /* Convert DirectClass packet to VIFF RGB pixel. */ number_pixels=(MagickSizeType) image->columns*image->rows; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q=ScaleQuantumToChar(GetPixelRed(image,p)); *(q+number_pixels)=ScaleQuantumToChar(GetPixelGreen(image,p)); *(q+number_pixels*2)=ScaleQuantumToChar(GetPixelBlue(image,p)); if (image->alpha_trait != UndefinedPixelTrait) *(q+number_pixels*3)=ScaleQuantumToChar((Quantum) (GetPixelAlpha(image,p))); p+=GetPixelChannels(image); q++; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (SetImageGray(image,exception) == MagickFalse) { unsigned char *viff_colormap; /* Dump colormap to file. */ viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors, 3*sizeof(*viff_colormap)); if (viff_colormap == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); q=viff_colormap; for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].red)); for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].green)); for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].blue)); (void) WriteBlob(image,3*image->colors,viff_colormap); viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap); /* Convert PseudoClass packet to VIFF colormapped pixels. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(unsigned char) GetPixelIndex(image,p); p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (image->colors <= 2) { ssize_t x, y; register unsigned char bit, byte; /* Convert PseudoClass image to a VIFF monochrome image. */ for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { byte>>=1; if (GetPixelLuma(image,p) < (QuantumRange/2.0)) byte|=0x80; bit++; if (bit == 8) { *q++=byte; bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) *q++=byte >> (8-bit); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } else { /* Convert PseudoClass packet to VIFF grayscale pixel. */ for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(unsigned char) ClampToQuantum(GetPixelLuma(image,p)); p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } (void) WriteBlob(image,(size_t) packets,pixels); pixel_info=RelinquishVirtualMemory(pixel_info); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_905_1
crossvul-cpp_data_bad_3640_2
/* * Generic hugetlb support. * (C) William Irwin, April 2004 */ #include <linux/list.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/sysctl.h> #include <linux/highmem.h> #include <linux/mmu_notifier.h> #include <linux/nodemask.h> #include <linux/pagemap.h> #include <linux/mempolicy.h> #include <linux/cpuset.h> #include <linux/mutex.h> #include <linux/bootmem.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <asm/page.h> #include <asm/pgtable.h> #include <linux/io.h> #include <linux/hugetlb.h> #include <linux/node.h> #include "internal.h" const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; static gfp_t htlb_alloc_mask = GFP_HIGHUSER; unsigned long hugepages_treat_as_movable; static int max_hstate; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; __initdata LIST_HEAD(huge_boot_pages); /* for command line parsing */ static struct hstate * __initdata parsed_hstate; static unsigned long __initdata default_hstate_max_huge_pages; static unsigned long __initdata default_hstate_size; #define for_each_hstate(h) \ for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++) /* * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages */ static DEFINE_SPINLOCK(hugetlb_lock); /* * Region tracking -- allows tracking of reservations and instantiated pages * across the pages in a mapping. * * The region data structures are protected by a combination of the mmap_sem * and the hugetlb_instantion_mutex. To access or modify a region the caller * must either hold the mmap_sem for write, or the mmap_sem for read and * the hugetlb_instantiation mutex: * * down_write(&mm->mmap_sem); * or * down_read(&mm->mmap_sem); * mutex_lock(&hugetlb_instantiation_mutex); */ struct file_region { struct list_head link; long from; long to; }; static long region_add(struct list_head *head, long f, long t) { struct file_region *rg, *nrg, *trg; /* Locate the region we are either in or before. */ list_for_each_entry(rg, head, link) if (f <= rg->to) break; /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; /* Check for and consume any regions we now overlap with. */ nrg = rg; list_for_each_entry_safe(rg, trg, rg->link.prev, link) { if (&rg->link == head) break; if (rg->from > t) break; /* If this area reaches higher then extend our area to * include it completely. If this is not the first area * which we intend to reuse, free it. */ if (rg->to > t) t = rg->to; if (rg != nrg) { list_del(&rg->link); kfree(rg); } } nrg->from = f; nrg->to = t; return 0; } static long region_chg(struct list_head *head, long f, long t) { struct file_region *rg, *nrg; long chg = 0; /* Locate the region we are before or in. */ list_for_each_entry(rg, head, link) if (f <= rg->to) break; /* If we are below the current region then a new region is required. * Subtle, allocate a new region at the position but make it zero * size such that we can guarantee to record the reservation. */ if (&rg->link == head || t < rg->from) { nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) return -ENOMEM; nrg->from = f; nrg->to = f; INIT_LIST_HEAD(&nrg->link); list_add(&nrg->link, rg->link.prev); return t - f; } /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; chg = t - f; /* Check for and consume any regions we now overlap with. */ list_for_each_entry(rg, rg->link.prev, link) { if (&rg->link == head) break; if (rg->from > t) return chg; /* We overlap with this area, if it extends further than * us then we must extend ourselves. Account for its * existing reservation. */ if (rg->to > t) { chg += rg->to - t; t = rg->to; } chg -= rg->to - rg->from; } return chg; } static long region_truncate(struct list_head *head, long end) { struct file_region *rg, *trg; long chg = 0; /* Locate the region we are either in or before. */ list_for_each_entry(rg, head, link) if (end <= rg->to) break; if (&rg->link == head) return 0; /* If we are in the middle of a region then adjust it. */ if (end > rg->from) { chg = rg->to - end; rg->to = end; rg = list_entry(rg->link.next, typeof(*rg), link); } /* Drop any remaining regions. */ list_for_each_entry_safe(rg, trg, rg->link.prev, link) { if (&rg->link == head) break; chg += rg->to - rg->from; list_del(&rg->link); kfree(rg); } return chg; } static long region_count(struct list_head *head, long f, long t) { struct file_region *rg; long chg = 0; /* Locate each segment we overlap with, and count that overlap. */ list_for_each_entry(rg, head, link) { int seg_from; int seg_to; if (rg->to <= f) continue; if (rg->from >= t) break; seg_from = max(rg->from, f); seg_to = min(rg->to, t); chg += seg_to - seg_from; } return chg; } /* * Convert the address within this vma to the page offset within * the mapping, in pagecache page units; huge pages here. */ static pgoff_t vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { return ((address - vma->vm_start) >> huge_page_shift(h)) + (vma->vm_pgoff >> huge_page_order(h)); } pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address) { return vma_hugecache_offset(hstate_vma(vma), vma, address); } /* * Return the size of the pages allocated when backing a VMA. In the majority * cases this will be same size as used by the page table entries. */ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) { struct hstate *hstate; if (!is_vm_hugetlb_page(vma)) return PAGE_SIZE; hstate = hstate_vma(vma); return 1UL << (hstate->order + PAGE_SHIFT); } EXPORT_SYMBOL_GPL(vma_kernel_pagesize); /* * Return the page size being used by the MMU to back a VMA. In the majority * of cases, the page size used by the kernel matches the MMU size. On * architectures where it differs, an architecture-specific version of this * function is required. */ #ifndef vma_mmu_pagesize unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { return vma_kernel_pagesize(vma); } #endif /* * Flags for MAP_PRIVATE reservations. These are stored in the bottom * bits of the reservation map pointer, which are always clear due to * alignment. */ #define HPAGE_RESV_OWNER (1UL << 0) #define HPAGE_RESV_UNMAPPED (1UL << 1) #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) /* * These helpers are used to track how many pages are reserved for * faults in a MAP_PRIVATE mapping. Only the process that called mmap() * is guaranteed to have their future faults succeed. * * With the exception of reset_vma_resv_huge_pages() which is called at fork(), * the reserve counters are updated with the hugetlb_lock held. It is safe * to reset the VMA at fork() time as it is not in use yet and there is no * chance of the global counters getting corrupted as a result of the values. * * The private mapping reservation is represented in a subtly different * manner to a shared mapping. A shared mapping has a region map associated * with the underlying file, this region map represents the backing file * pages which have ever had a reservation assigned which this persists even * after the page is instantiated. A private mapping has a region map * associated with the original mmap which is attached to all VMAs which * reference it, this region map represents those offsets which have consumed * reservation ie. where pages have been instantiated. */ static unsigned long get_vma_private_data(struct vm_area_struct *vma) { return (unsigned long)vma->vm_private_data; } static void set_vma_private_data(struct vm_area_struct *vma, unsigned long value) { vma->vm_private_data = (void *)value; } struct resv_map { struct kref refs; struct list_head regions; }; static struct resv_map *resv_map_alloc(void) { struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); if (!resv_map) return NULL; kref_init(&resv_map->refs); INIT_LIST_HEAD(&resv_map->regions); return resv_map; } static void resv_map_release(struct kref *ref) { struct resv_map *resv_map = container_of(ref, struct resv_map, refs); /* Clear out any active regions before we release the map. */ region_truncate(&resv_map->regions, 0); kfree(resv_map); } static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { VM_BUG_ON(!is_vm_hugetlb_page(vma)); if (!(vma->vm_flags & VM_MAYSHARE)) return (struct resv_map *)(get_vma_private_data(vma) & ~HPAGE_RESV_MASK); return NULL; } static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) { VM_BUG_ON(!is_vm_hugetlb_page(vma)); VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); set_vma_private_data(vma, (get_vma_private_data(vma) & HPAGE_RESV_MASK) | (unsigned long)map); } static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) { VM_BUG_ON(!is_vm_hugetlb_page(vma)); VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); set_vma_private_data(vma, get_vma_private_data(vma) | flags); } static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) { VM_BUG_ON(!is_vm_hugetlb_page(vma)); return (get_vma_private_data(vma) & flag) != 0; } /* Decrement the reserved pages in the hugepage pool by one */ static void decrement_hugepage_resv_vma(struct hstate *h, struct vm_area_struct *vma) { if (vma->vm_flags & VM_NORESERVE) return; if (vma->vm_flags & VM_MAYSHARE) { /* Shared mappings always use reserves */ h->resv_huge_pages--; } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { /* * Only the process that called mmap() has reserves for * private mappings. */ h->resv_huge_pages--; } } /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { VM_BUG_ON(!is_vm_hugetlb_page(vma)); if (!(vma->vm_flags & VM_MAYSHARE)) vma->vm_private_data = (void *)0; } /* Returns true if the VMA has associated reserve pages */ static int vma_has_reserves(struct vm_area_struct *vma) { if (vma->vm_flags & VM_MAYSHARE) return 1; if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) return 1; return 0; } static void copy_gigantic_page(struct page *dst, struct page *src) { int i; struct hstate *h = page_hstate(src); struct page *dst_base = dst; struct page *src_base = src; for (i = 0; i < pages_per_huge_page(h); ) { cond_resched(); copy_highpage(dst, src); i++; dst = mem_map_next(dst, dst_base, i); src = mem_map_next(src, src_base, i); } } void copy_huge_page(struct page *dst, struct page *src) { int i; struct hstate *h = page_hstate(src); if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { copy_gigantic_page(dst, src); return; } might_sleep(); for (i = 0; i < pages_per_huge_page(h); i++) { cond_resched(); copy_highpage(dst + i, src + i); } } static void enqueue_huge_page(struct hstate *h, struct page *page) { int nid = page_to_nid(page); list_add(&page->lru, &h->hugepage_freelists[nid]); h->free_huge_pages++; h->free_huge_pages_node[nid]++; } static struct page *dequeue_huge_page_node(struct hstate *h, int nid) { struct page *page; if (list_empty(&h->hugepage_freelists[nid])) return NULL; page = list_entry(h->hugepage_freelists[nid].next, struct page, lru); list_del(&page->lru); set_page_refcounted(page); h->free_huge_pages--; h->free_huge_pages_node[nid]--; return page; } static struct page *dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve) { struct page *page; struct mempolicy *mpol; nodemask_t *nodemask; struct zonelist *zonelist; struct zone *zone; struct zoneref *z; unsigned int cpuset_mems_cookie; retry_cpuset: cpuset_mems_cookie = get_mems_allowed(); zonelist = huge_zonelist(vma, address, htlb_alloc_mask, &mpol, &nodemask); /* * A child process with MAP_PRIVATE mappings created by their parent * have no page reserves. This check ensures that reservations are * not "stolen". The child may still get SIGKILLed */ if (!vma_has_reserves(vma) && h->free_huge_pages - h->resv_huge_pages == 0) goto err; /* If reserves cannot be used, ensure enough pages are in the pool */ if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) goto err; for_each_zone_zonelist_nodemask(zone, z, zonelist, MAX_NR_ZONES - 1, nodemask) { if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) { page = dequeue_huge_page_node(h, zone_to_nid(zone)); if (page) { if (!avoid_reserve) decrement_hugepage_resv_vma(h, vma); break; } } } mpol_cond_put(mpol); if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) goto retry_cpuset; return page; err: mpol_cond_put(mpol); return NULL; } static void update_and_free_page(struct hstate *h, struct page *page) { int i; VM_BUG_ON(h->order >= MAX_ORDER); h->nr_huge_pages--; h->nr_huge_pages_node[page_to_nid(page)]--; for (i = 0; i < pages_per_huge_page(h); i++) { page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 1 << PG_private | 1 << PG_writeback); } set_compound_page_dtor(page, NULL); set_page_refcounted(page); arch_release_hugepage(page); __free_pages(page, huge_page_order(h)); } struct hstate *size_to_hstate(unsigned long size) { struct hstate *h; for_each_hstate(h) { if (huge_page_size(h) == size) return h; } return NULL; } static void free_huge_page(struct page *page) { /* * Can't pass hstate in here because it is called from the * compound page destructor. */ struct hstate *h = page_hstate(page); int nid = page_to_nid(page); struct address_space *mapping; mapping = (struct address_space *) page_private(page); set_page_private(page, 0); page->mapping = NULL; BUG_ON(page_count(page)); BUG_ON(page_mapcount(page)); INIT_LIST_HEAD(&page->lru); spin_lock(&hugetlb_lock); if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { update_and_free_page(h, page); h->surplus_huge_pages--; h->surplus_huge_pages_node[nid]--; } else { enqueue_huge_page(h, page); } spin_unlock(&hugetlb_lock); if (mapping) hugetlb_put_quota(mapping, 1); } static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) { set_compound_page_dtor(page, free_huge_page); spin_lock(&hugetlb_lock); h->nr_huge_pages++; h->nr_huge_pages_node[nid]++; spin_unlock(&hugetlb_lock); put_page(page); /* free it into the hugepage allocator */ } static void prep_compound_gigantic_page(struct page *page, unsigned long order) { int i; int nr_pages = 1 << order; struct page *p = page + 1; /* we rely on prep_new_huge_page to set the destructor */ set_compound_order(page, order); __SetPageHead(page); for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { __SetPageTail(p); set_page_count(p, 0); p->first_page = page; } } int PageHuge(struct page *page) { compound_page_dtor *dtor; if (!PageCompound(page)) return 0; page = compound_head(page); dtor = get_compound_page_dtor(page); return dtor == free_huge_page; } EXPORT_SYMBOL_GPL(PageHuge); static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) { struct page *page; if (h->order >= MAX_ORDER) return NULL; page = alloc_pages_exact_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); if (page) { if (arch_prepare_hugepage(page)) { __free_pages(page, huge_page_order(h)); return NULL; } prep_new_huge_page(h, page, nid); } return page; } /* * common helper functions for hstate_next_node_to_{alloc|free}. * We may have allocated or freed a huge page based on a different * nodes_allowed previously, so h->next_node_to_{alloc|free} might * be outside of *nodes_allowed. Ensure that we use an allowed * node for alloc or free. */ static int next_node_allowed(int nid, nodemask_t *nodes_allowed) { nid = next_node(nid, *nodes_allowed); if (nid == MAX_NUMNODES) nid = first_node(*nodes_allowed); VM_BUG_ON(nid >= MAX_NUMNODES); return nid; } static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) { if (!node_isset(nid, *nodes_allowed)) nid = next_node_allowed(nid, nodes_allowed); return nid; } /* * returns the previously saved node ["this node"] from which to * allocate a persistent huge page for the pool and advance the * next node from which to allocate, handling wrap at end of node * mask. */ static int hstate_next_node_to_alloc(struct hstate *h, nodemask_t *nodes_allowed) { int nid; VM_BUG_ON(!nodes_allowed); nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); return nid; } static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) { struct page *page; int start_nid; int next_nid; int ret = 0; start_nid = hstate_next_node_to_alloc(h, nodes_allowed); next_nid = start_nid; do { page = alloc_fresh_huge_page_node(h, next_nid); if (page) { ret = 1; break; } next_nid = hstate_next_node_to_alloc(h, nodes_allowed); } while (next_nid != start_nid); if (ret) count_vm_event(HTLB_BUDDY_PGALLOC); else count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); return ret; } /* * helper for free_pool_huge_page() - return the previously saved * node ["this node"] from which to free a huge page. Advance the * next node id whether or not we find a free huge page to free so * that the next attempt to free addresses the next node. */ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) { int nid; VM_BUG_ON(!nodes_allowed); nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); return nid; } /* * Free huge page from pool from next node to free. * Attempt to keep persistent huge pages more or less * balanced over allowed nodes. * Called with hugetlb_lock locked. */ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, bool acct_surplus) { int start_nid; int next_nid; int ret = 0; start_nid = hstate_next_node_to_free(h, nodes_allowed); next_nid = start_nid; do { /* * If we're returning unused surplus pages, only examine * nodes with surplus pages. */ if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) && !list_empty(&h->hugepage_freelists[next_nid])) { struct page *page = list_entry(h->hugepage_freelists[next_nid].next, struct page, lru); list_del(&page->lru); h->free_huge_pages--; h->free_huge_pages_node[next_nid]--; if (acct_surplus) { h->surplus_huge_pages--; h->surplus_huge_pages_node[next_nid]--; } update_and_free_page(h, page); ret = 1; break; } next_nid = hstate_next_node_to_free(h, nodes_allowed); } while (next_nid != start_nid); return ret; } static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) { struct page *page; unsigned int r_nid; if (h->order >= MAX_ORDER) return NULL; /* * Assume we will successfully allocate the surplus page to * prevent racing processes from causing the surplus to exceed * overcommit * * This however introduces a different race, where a process B * tries to grow the static hugepage pool while alloc_pages() is * called by process A. B will only examine the per-node * counters in determining if surplus huge pages can be * converted to normal huge pages in adjust_pool_surplus(). A * won't be able to increment the per-node counter, until the * lock is dropped by B, but B doesn't drop hugetlb_lock until * no more huge pages can be converted from surplus to normal * state (and doesn't try to convert again). Thus, we have a * case where a surplus huge page exists, the pool is grown, and * the surplus huge page still exists after, even though it * should just have been converted to a normal huge page. This * does not leak memory, though, as the hugepage will be freed * once it is out of use. It also does not allow the counters to * go out of whack in adjust_pool_surplus() as we don't modify * the node values until we've gotten the hugepage and only the * per-node value is checked there. */ spin_lock(&hugetlb_lock); if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { spin_unlock(&hugetlb_lock); return NULL; } else { h->nr_huge_pages++; h->surplus_huge_pages++; } spin_unlock(&hugetlb_lock); if (nid == NUMA_NO_NODE) page = alloc_pages(htlb_alloc_mask|__GFP_COMP| __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); else page = alloc_pages_exact_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); if (page && arch_prepare_hugepage(page)) { __free_pages(page, huge_page_order(h)); page = NULL; } spin_lock(&hugetlb_lock); if (page) { r_nid = page_to_nid(page); set_compound_page_dtor(page, free_huge_page); /* * We incremented the global counters already */ h->nr_huge_pages_node[r_nid]++; h->surplus_huge_pages_node[r_nid]++; __count_vm_event(HTLB_BUDDY_PGALLOC); } else { h->nr_huge_pages--; h->surplus_huge_pages--; __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); } spin_unlock(&hugetlb_lock); return page; } /* * This allocation function is useful in the context where vma is irrelevant. * E.g. soft-offlining uses this function because it only cares physical * address of error page. */ struct page *alloc_huge_page_node(struct hstate *h, int nid) { struct page *page; spin_lock(&hugetlb_lock); page = dequeue_huge_page_node(h, nid); spin_unlock(&hugetlb_lock); if (!page) page = alloc_buddy_huge_page(h, nid); return page; } /* * Increase the hugetlb pool such that it can accommodate a reservation * of size 'delta'. */ static int gather_surplus_pages(struct hstate *h, int delta) { struct list_head surplus_list; struct page *page, *tmp; int ret, i; int needed, allocated; bool alloc_ok = true; needed = (h->resv_huge_pages + delta) - h->free_huge_pages; if (needed <= 0) { h->resv_huge_pages += delta; return 0; } allocated = 0; INIT_LIST_HEAD(&surplus_list); ret = -ENOMEM; retry: spin_unlock(&hugetlb_lock); for (i = 0; i < needed; i++) { page = alloc_buddy_huge_page(h, NUMA_NO_NODE); if (!page) { alloc_ok = false; break; } list_add(&page->lru, &surplus_list); } allocated += i; /* * After retaking hugetlb_lock, we need to recalculate 'needed' * because either resv_huge_pages or free_huge_pages may have changed. */ spin_lock(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - (h->free_huge_pages + allocated); if (needed > 0) { if (alloc_ok) goto retry; /* * We were not able to allocate enough pages to * satisfy the entire reservation so we free what * we've allocated so far. */ goto free; } /* * The surplus_list now contains _at_least_ the number of extra pages * needed to accommodate the reservation. Add the appropriate number * of pages to the hugetlb pool and free the extras back to the buddy * allocator. Commit the entire reservation here to prevent another * process from stealing the pages as they are added to the pool but * before they are reserved. */ needed += allocated; h->resv_huge_pages += delta; ret = 0; /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { if ((--needed) < 0) break; list_del(&page->lru); /* * This page is now managed by the hugetlb allocator and has * no users -- drop the buddy allocator's reference. */ put_page_testzero(page); VM_BUG_ON(page_count(page)); enqueue_huge_page(h, page); } free: spin_unlock(&hugetlb_lock); /* Free unnecessary surplus pages to the buddy allocator */ if (!list_empty(&surplus_list)) { list_for_each_entry_safe(page, tmp, &surplus_list, lru) { list_del(&page->lru); put_page(page); } } spin_lock(&hugetlb_lock); return ret; } /* * When releasing a hugetlb pool reservation, any surplus pages that were * allocated to satisfy the reservation must be explicitly freed if they were * never used. * Called with hugetlb_lock held. */ static void return_unused_surplus_pages(struct hstate *h, unsigned long unused_resv_pages) { unsigned long nr_pages; /* Uncommit the reservation */ h->resv_huge_pages -= unused_resv_pages; /* Cannot return gigantic pages currently */ if (h->order >= MAX_ORDER) return; nr_pages = min(unused_resv_pages, h->surplus_huge_pages); /* * We want to release as many surplus pages as possible, spread * evenly across all nodes with memory. Iterate across these nodes * until we can no longer free unreserved surplus pages. This occurs * when the nodes with surplus pages have no free pages. * free_pool_huge_page() will balance the the freed pages across the * on-line nodes with memory and will handle the hstate accounting. */ while (nr_pages--) { if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1)) break; } } /* * Determine if the huge page at addr within the vma has an associated * reservation. Where it does not we will need to logically increase * reservation and actually increase quota before an allocation can occur. * Where any new reservation would be required the reservation change is * prepared, but not committed. Once the page has been quota'd allocated * an instantiated the change should be committed via vma_commit_reservation. * No action is required on failure. */ static long vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; if (vma->vm_flags & VM_MAYSHARE) { pgoff_t idx = vma_hugecache_offset(h, vma, addr); return region_chg(&inode->i_mapping->private_list, idx, idx + 1); } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { return 1; } else { long err; pgoff_t idx = vma_hugecache_offset(h, vma, addr); struct resv_map *reservations = vma_resv_map(vma); err = region_chg(&reservations->regions, idx, idx + 1); if (err < 0) return err; return 0; } } static void vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; if (vma->vm_flags & VM_MAYSHARE) { pgoff_t idx = vma_hugecache_offset(h, vma, addr); region_add(&inode->i_mapping->private_list, idx, idx + 1); } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { pgoff_t idx = vma_hugecache_offset(h, vma, addr); struct resv_map *reservations = vma_resv_map(vma); /* Mark this page used in the map. */ region_add(&reservations->regions, idx, idx + 1); } } static struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { struct hstate *h = hstate_vma(vma); struct page *page; struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; long chg; /* * Processes that did not create the mapping will have no reserves and * will not have accounted against quota. Check that the quota can be * made before satisfying the allocation * MAP_NORESERVE mappings may also need pages and quota allocated * if no reserve mapping overlaps. */ chg = vma_needs_reservation(h, vma, addr); if (chg < 0) return ERR_PTR(-VM_FAULT_OOM); if (chg) if (hugetlb_get_quota(inode->i_mapping, chg)) return ERR_PTR(-VM_FAULT_SIGBUS); spin_lock(&hugetlb_lock); page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); spin_unlock(&hugetlb_lock); if (!page) { page = alloc_buddy_huge_page(h, NUMA_NO_NODE); if (!page) { hugetlb_put_quota(inode->i_mapping, chg); return ERR_PTR(-VM_FAULT_SIGBUS); } } set_page_private(page, (unsigned long) mapping); vma_commit_reservation(h, vma, addr); return page; } int __weak alloc_bootmem_huge_page(struct hstate *h) { struct huge_bootmem_page *m; int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); while (nr_nodes) { void *addr; addr = __alloc_bootmem_node_nopanic( NODE_DATA(hstate_next_node_to_alloc(h, &node_states[N_HIGH_MEMORY])), huge_page_size(h), huge_page_size(h), 0); if (addr) { /* * Use the beginning of the huge page to store the * huge_bootmem_page struct (until gather_bootmem * puts them into the mem_map). */ m = addr; goto found; } nr_nodes--; } return 0; found: BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); /* Put them into a private list first because mem_map is not up yet */ list_add(&m->list, &huge_boot_pages); m->hstate = h; return 1; } static void prep_compound_huge_page(struct page *page, int order) { if (unlikely(order > (MAX_ORDER - 1))) prep_compound_gigantic_page(page, order); else prep_compound_page(page, order); } /* Put bootmem huge pages into the standard lists after mem_map is up */ static void __init gather_bootmem_prealloc(void) { struct huge_bootmem_page *m; list_for_each_entry(m, &huge_boot_pages, list) { struct hstate *h = m->hstate; struct page *page; #ifdef CONFIG_HIGHMEM page = pfn_to_page(m->phys >> PAGE_SHIFT); free_bootmem_late((unsigned long)m, sizeof(struct huge_bootmem_page)); #else page = virt_to_page(m); #endif __ClearPageReserved(page); WARN_ON(page_count(page) != 1); prep_compound_huge_page(page, h->order); prep_new_huge_page(h, page, page_to_nid(page)); /* * If we had gigantic hugepages allocated at boot time, we need * to restore the 'stolen' pages to totalram_pages in order to * fix confusing memory reports from free(1) and another * side-effects, like CommitLimit going negative. */ if (h->order > (MAX_ORDER - 1)) totalram_pages += 1 << h->order; } } static void __init hugetlb_hstate_alloc_pages(struct hstate *h) { unsigned long i; for (i = 0; i < h->max_huge_pages; ++i) { if (h->order >= MAX_ORDER) { if (!alloc_bootmem_huge_page(h)) break; } else if (!alloc_fresh_huge_page(h, &node_states[N_HIGH_MEMORY])) break; } h->max_huge_pages = i; } static void __init hugetlb_init_hstates(void) { struct hstate *h; for_each_hstate(h) { /* oversize hugepages were init'ed in early boot */ if (h->order < MAX_ORDER) hugetlb_hstate_alloc_pages(h); } } static char * __init memfmt(char *buf, unsigned long n) { if (n >= (1UL << 30)) sprintf(buf, "%lu GB", n >> 30); else if (n >= (1UL << 20)) sprintf(buf, "%lu MB", n >> 20); else sprintf(buf, "%lu KB", n >> 10); return buf; } static void __init report_hugepages(void) { struct hstate *h; for_each_hstate(h) { char buf[32]; printk(KERN_INFO "HugeTLB registered %s page size, " "pre-allocated %ld pages\n", memfmt(buf, huge_page_size(h)), h->free_huge_pages); } } #ifdef CONFIG_HIGHMEM static void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { int i; if (h->order >= MAX_ORDER) return; for_each_node_mask(i, *nodes_allowed) { struct page *page, *next; struct list_head *freel = &h->hugepage_freelists[i]; list_for_each_entry_safe(page, next, freel, lru) { if (count >= h->nr_huge_pages) return; if (PageHighMem(page)) continue; list_del(&page->lru); update_and_free_page(h, page); h->free_huge_pages--; h->free_huge_pages_node[page_to_nid(page)]--; } } } #else static inline void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { } #endif /* * Increment or decrement surplus_huge_pages. Keep node-specific counters * balanced by operating on them in a round-robin fashion. * Returns 1 if an adjustment was made. */ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, int delta) { int start_nid, next_nid; int ret = 0; VM_BUG_ON(delta != -1 && delta != 1); if (delta < 0) start_nid = hstate_next_node_to_alloc(h, nodes_allowed); else start_nid = hstate_next_node_to_free(h, nodes_allowed); next_nid = start_nid; do { int nid = next_nid; if (delta < 0) { /* * To shrink on this node, there must be a surplus page */ if (!h->surplus_huge_pages_node[nid]) { next_nid = hstate_next_node_to_alloc(h, nodes_allowed); continue; } } if (delta > 0) { /* * Surplus cannot exceed the total number of pages */ if (h->surplus_huge_pages_node[nid] >= h->nr_huge_pages_node[nid]) { next_nid = hstate_next_node_to_free(h, nodes_allowed); continue; } } h->surplus_huge_pages += delta; h->surplus_huge_pages_node[nid] += delta; ret = 1; break; } while (next_nid != start_nid); return ret; } #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { unsigned long min_count, ret; if (h->order >= MAX_ORDER) return h->max_huge_pages; /* * Increase the pool size * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. * * We might race with alloc_buddy_huge_page() here and be unable * to convert a surplus huge page to a normal huge page. That is * not critical, though, it just means the overall size of the * pool might be one hugepage larger than it needs to be, but * within all the constraints specified by the sysctls. */ spin_lock(&hugetlb_lock); while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, -1)) break; } while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the * page, free_huge_page will handle it by freeing the page * and reducing the surplus. */ spin_unlock(&hugetlb_lock); ret = alloc_fresh_huge_page(h, nodes_allowed); spin_lock(&hugetlb_lock); if (!ret) goto out; /* Bail for signals. Probably ctrl-c from user */ if (signal_pending(current)) goto out; } /* * Decrease the pool size * First return free pages to the buddy allocator (being careful * to keep enough around to satisfy reservations). Then place * pages into surplus state as needed so the pool will shrink * to the desired size as pages become free. * * By placing pages into the surplus state independent of the * overcommit value, we are allowing the surplus pool size to * exceed overcommit. There are few sane options here. Since * alloc_buddy_huge_page() is checking the global counter, * though, we'll note that we're not allowed to exceed surplus * and won't grow the pool anywhere else. Not until one of the * sysctls are changed, or the surplus pages go out of use. */ min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; min_count = max(count, min_count); try_to_free_low(h, min_count, nodes_allowed); while (min_count < persistent_huge_pages(h)) { if (!free_pool_huge_page(h, nodes_allowed, 0)) break; } while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, 1)) break; } out: ret = persistent_huge_pages(h); spin_unlock(&hugetlb_lock); return ret; } #define HSTATE_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define HSTATE_ATTR(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) static struct kobject *hugepages_kobj; static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) { int i; for (i = 0; i < HUGE_MAX_HSTATE; i++) if (hstate_kobjs[i] == kobj) { if (nidp) *nidp = NUMA_NO_NODE; return &hstates[i]; } return kobj_to_node_hstate(kobj, nidp); } static ssize_t nr_hugepages_show_common(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long nr_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) nr_huge_pages = h->nr_huge_pages; else nr_huge_pages = h->nr_huge_pages_node[nid]; return sprintf(buf, "%lu\n", nr_huge_pages); } static ssize_t nr_hugepages_store_common(bool obey_mempolicy, struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { int err; int nid; unsigned long count; struct hstate *h; NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); err = strict_strtoul(buf, 10, &count); if (err) goto out; h = kobj_to_hstate(kobj, &nid); if (h->order >= MAX_ORDER) { err = -EINVAL; goto out; } if (nid == NUMA_NO_NODE) { /* * global hstate attribute */ if (!(obey_mempolicy && init_nodemask_of_mempolicy(nodes_allowed))) { NODEMASK_FREE(nodes_allowed); nodes_allowed = &node_states[N_HIGH_MEMORY]; } } else if (nodes_allowed) { /* * per node hstate attribute: adjust count to global, * but restrict alloc/free to the specified node. */ count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; init_nodemask_of_node(nodes_allowed, nid); } else nodes_allowed = &node_states[N_HIGH_MEMORY]; h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); if (nodes_allowed != &node_states[N_HIGH_MEMORY]) NODEMASK_FREE(nodes_allowed); return len; out: NODEMASK_FREE(nodes_allowed); return err; } static ssize_t nr_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return nr_hugepages_show_common(kobj, attr, buf); } static ssize_t nr_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { return nr_hugepages_store_common(false, kobj, attr, buf, len); } HSTATE_ATTR(nr_hugepages); #ifdef CONFIG_NUMA /* * hstate attribute for optionally mempolicy-based constraint on persistent * huge page alloc/free. */ static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return nr_hugepages_show_common(kobj, attr, buf); } static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { return nr_hugepages_store_common(true, kobj, attr, buf, len); } HSTATE_ATTR(nr_hugepages_mempolicy); #endif static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); } static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long input; struct hstate *h = kobj_to_hstate(kobj, NULL); if (h->order >= MAX_ORDER) return -EINVAL; err = strict_strtoul(buf, 10, &input); if (err) return err; spin_lock(&hugetlb_lock); h->nr_overcommit_huge_pages = input; spin_unlock(&hugetlb_lock); return count; } HSTATE_ATTR(nr_overcommit_hugepages); static ssize_t free_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long free_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) free_huge_pages = h->free_huge_pages; else free_huge_pages = h->free_huge_pages_node[nid]; return sprintf(buf, "%lu\n", free_huge_pages); } HSTATE_ATTR_RO(free_hugepages); static ssize_t resv_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->resv_huge_pages); } HSTATE_ATTR_RO(resv_hugepages); static ssize_t surplus_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long surplus_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) surplus_huge_pages = h->surplus_huge_pages; else surplus_huge_pages = h->surplus_huge_pages_node[nid]; return sprintf(buf, "%lu\n", surplus_huge_pages); } HSTATE_ATTR_RO(surplus_hugepages); static struct attribute *hstate_attrs[] = { &nr_hugepages_attr.attr, &nr_overcommit_hugepages_attr.attr, &free_hugepages_attr.attr, &resv_hugepages_attr.attr, &surplus_hugepages_attr.attr, #ifdef CONFIG_NUMA &nr_hugepages_mempolicy_attr.attr, #endif NULL, }; static struct attribute_group hstate_attr_group = { .attrs = hstate_attrs, }; static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, struct kobject **hstate_kobjs, struct attribute_group *hstate_attr_group) { int retval; int hi = h - hstates; hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); if (!hstate_kobjs[hi]) return -ENOMEM; retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); if (retval) kobject_put(hstate_kobjs[hi]); return retval; } static void __init hugetlb_sysfs_init(void) { struct hstate *h; int err; hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); if (!hugepages_kobj) return; for_each_hstate(h) { err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, hstate_kobjs, &hstate_attr_group); if (err) printk(KERN_ERR "Hugetlb: Unable to add hstate %s", h->name); } } #ifdef CONFIG_NUMA /* * node_hstate/s - associate per node hstate attributes, via their kobjects, * with node devices in node_devices[] using a parallel array. The array * index of a node device or _hstate == node id. * This is here to avoid any static dependency of the node device driver, in * the base kernel, on the hugetlb module. */ struct node_hstate { struct kobject *hugepages_kobj; struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; }; struct node_hstate node_hstates[MAX_NUMNODES]; /* * A subset of global hstate attributes for node devices */ static struct attribute *per_node_hstate_attrs[] = { &nr_hugepages_attr.attr, &free_hugepages_attr.attr, &surplus_hugepages_attr.attr, NULL, }; static struct attribute_group per_node_hstate_attr_group = { .attrs = per_node_hstate_attrs, }; /* * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. * Returns node id via non-NULL nidp. */ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) { int nid; for (nid = 0; nid < nr_node_ids; nid++) { struct node_hstate *nhs = &node_hstates[nid]; int i; for (i = 0; i < HUGE_MAX_HSTATE; i++) if (nhs->hstate_kobjs[i] == kobj) { if (nidp) *nidp = nid; return &hstates[i]; } } BUG(); return NULL; } /* * Unregister hstate attributes from a single node device. * No-op if no hstate attributes attached. */ void hugetlb_unregister_node(struct node *node) { struct hstate *h; struct node_hstate *nhs = &node_hstates[node->dev.id]; if (!nhs->hugepages_kobj) return; /* no hstate attributes */ for_each_hstate(h) if (nhs->hstate_kobjs[h - hstates]) { kobject_put(nhs->hstate_kobjs[h - hstates]); nhs->hstate_kobjs[h - hstates] = NULL; } kobject_put(nhs->hugepages_kobj); nhs->hugepages_kobj = NULL; } /* * hugetlb module exit: unregister hstate attributes from node devices * that have them. */ static void hugetlb_unregister_all_nodes(void) { int nid; /* * disable node device registrations. */ register_hugetlbfs_with_node(NULL, NULL); /* * remove hstate attributes from any nodes that have them. */ for (nid = 0; nid < nr_node_ids; nid++) hugetlb_unregister_node(&node_devices[nid]); } /* * Register hstate attributes for a single node device. * No-op if attributes already registered. */ void hugetlb_register_node(struct node *node) { struct hstate *h; struct node_hstate *nhs = &node_hstates[node->dev.id]; int err; if (nhs->hugepages_kobj) return; /* already allocated */ nhs->hugepages_kobj = kobject_create_and_add("hugepages", &node->dev.kobj); if (!nhs->hugepages_kobj) return; for_each_hstate(h) { err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, nhs->hstate_kobjs, &per_node_hstate_attr_group); if (err) { printk(KERN_ERR "Hugetlb: Unable to add hstate %s" " for node %d\n", h->name, node->dev.id); hugetlb_unregister_node(node); break; } } } /* * hugetlb init time: register hstate attributes for all registered node * devices of nodes that have memory. All on-line nodes should have * registered their associated device by this time. */ static void hugetlb_register_all_nodes(void) { int nid; for_each_node_state(nid, N_HIGH_MEMORY) { struct node *node = &node_devices[nid]; if (node->dev.id == nid) hugetlb_register_node(node); } /* * Let the node device driver know we're here so it can * [un]register hstate attributes on node hotplug. */ register_hugetlbfs_with_node(hugetlb_register_node, hugetlb_unregister_node); } #else /* !CONFIG_NUMA */ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) { BUG(); if (nidp) *nidp = -1; return NULL; } static void hugetlb_unregister_all_nodes(void) { } static void hugetlb_register_all_nodes(void) { } #endif static void __exit hugetlb_exit(void) { struct hstate *h; hugetlb_unregister_all_nodes(); for_each_hstate(h) { kobject_put(hstate_kobjs[h - hstates]); } kobject_put(hugepages_kobj); } module_exit(hugetlb_exit); static int __init hugetlb_init(void) { /* Some platform decide whether they support huge pages at boot * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when * there is no such support */ if (HPAGE_SHIFT == 0) return 0; if (!size_to_hstate(default_hstate_size)) { default_hstate_size = HPAGE_SIZE; if (!size_to_hstate(default_hstate_size)) hugetlb_add_hstate(HUGETLB_PAGE_ORDER); } default_hstate_idx = size_to_hstate(default_hstate_size) - hstates; if (default_hstate_max_huge_pages) default_hstate.max_huge_pages = default_hstate_max_huge_pages; hugetlb_init_hstates(); gather_bootmem_prealloc(); report_hugepages(); hugetlb_sysfs_init(); hugetlb_register_all_nodes(); return 0; } module_init(hugetlb_init); /* Should be called on processing a hugepagesz=... option */ void __init hugetlb_add_hstate(unsigned order) { struct hstate *h; unsigned long i; if (size_to_hstate(PAGE_SIZE << order)) { printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); return; } BUG_ON(max_hstate >= HUGE_MAX_HSTATE); BUG_ON(order == 0); h = &hstates[max_hstate++]; h->order = order; h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); h->nr_huge_pages = 0; h->free_huge_pages = 0; for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]); h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]); snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/1024); parsed_hstate = h; } static int __init hugetlb_nrpages_setup(char *s) { unsigned long *mhp; static unsigned long *last_mhp; /* * !max_hstate means we haven't parsed a hugepagesz= parameter yet, * so this hugepages= parameter goes to the "default hstate". */ if (!max_hstate) mhp = &default_hstate_max_huge_pages; else mhp = &parsed_hstate->max_huge_pages; if (mhp == last_mhp) { printk(KERN_WARNING "hugepages= specified twice without " "interleaving hugepagesz=, ignoring\n"); return 1; } if (sscanf(s, "%lu", mhp) <= 0) *mhp = 0; /* * Global state is always initialized later in hugetlb_init. * But we need to allocate >= MAX_ORDER hstates here early to still * use the bootmem allocator. */ if (max_hstate && parsed_hstate->order >= MAX_ORDER) hugetlb_hstate_alloc_pages(parsed_hstate); last_mhp = mhp; return 1; } __setup("hugepages=", hugetlb_nrpages_setup); static int __init hugetlb_default_setup(char *s) { default_hstate_size = memparse(s, &s); return 1; } __setup("default_hugepagesz=", hugetlb_default_setup); static unsigned int cpuset_mems_nr(unsigned int *array) { int node; unsigned int nr = 0; for_each_node_mask(node, cpuset_current_mems_allowed) nr += array[node]; return nr; } #ifdef CONFIG_SYSCTL static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp; int ret; tmp = h->max_huge_pages; if (write && h->order >= MAX_ORDER) return -EINVAL; table->data = &tmp; table->maxlen = sizeof(unsigned long); ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); if (ret) goto out; if (write) { NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); if (!(obey_mempolicy && init_nodemask_of_mempolicy(nodes_allowed))) { NODEMASK_FREE(nodes_allowed); nodes_allowed = &node_states[N_HIGH_MEMORY]; } h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); if (nodes_allowed != &node_states[N_HIGH_MEMORY]) NODEMASK_FREE(nodes_allowed); } out: return ret; } int hugetlb_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { return hugetlb_sysctl_handler_common(false, table, write, buffer, length, ppos); } #ifdef CONFIG_NUMA int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { return hugetlb_sysctl_handler_common(true, table, write, buffer, length, ppos); } #endif /* CONFIG_NUMA */ int hugetlb_treat_movable_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec(table, write, buffer, length, ppos); if (hugepages_treat_as_movable) htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; else htlb_alloc_mask = GFP_HIGHUSER; return 0; } int hugetlb_overcommit_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp; int ret; tmp = h->nr_overcommit_huge_pages; if (write && h->order >= MAX_ORDER) return -EINVAL; table->data = &tmp; table->maxlen = sizeof(unsigned long); ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); if (ret) goto out; if (write) { spin_lock(&hugetlb_lock); h->nr_overcommit_huge_pages = tmp; spin_unlock(&hugetlb_lock); } out: return ret; } #endif /* CONFIG_SYSCTL */ void hugetlb_report_meminfo(struct seq_file *m) { struct hstate *h = &default_hstate; seq_printf(m, "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" "HugePages_Rsvd: %5lu\n" "HugePages_Surp: %5lu\n" "Hugepagesize: %8lu kB\n", h->nr_huge_pages, h->free_huge_pages, h->resv_huge_pages, h->surplus_huge_pages, 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); } int hugetlb_report_node_meminfo(int nid, char *buf) { struct hstate *h = &default_hstate; return sprintf(buf, "Node %d HugePages_Total: %5u\n" "Node %d HugePages_Free: %5u\n" "Node %d HugePages_Surp: %5u\n", nid, h->nr_huge_pages_node[nid], nid, h->free_huge_pages_node[nid], nid, h->surplus_huge_pages_node[nid]); } /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { struct hstate *h = &default_hstate; return h->nr_huge_pages * pages_per_huge_page(h); } static int hugetlb_acct_memory(struct hstate *h, long delta) { int ret = -ENOMEM; spin_lock(&hugetlb_lock); /* * When cpuset is configured, it breaks the strict hugetlb page * reservation as the accounting is done on a global variable. Such * reservation is completely rubbish in the presence of cpuset because * the reservation is not checked against page availability for the * current cpuset. Application can still potentially OOM'ed by kernel * with lack of free htlb page in cpuset that the task is in. * Attempt to enforce strict accounting with cpuset is almost * impossible (or too ugly) because cpuset is too fluid that * task or memory node can be dynamically moved between cpusets. * * The change of semantics for shared hugetlb mapping with cpuset is * undesirable. However, in order to preserve some of the semantics, * we fall back to check against current free page availability as * a best attempt and hopefully to minimize the impact of changing * semantics that cpuset has. */ if (delta > 0) { if (gather_surplus_pages(h, delta) < 0) goto out; if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { return_unused_surplus_pages(h, delta); goto out; } } ret = 0; if (delta < 0) return_unused_surplus_pages(h, (unsigned long) -delta); out: spin_unlock(&hugetlb_lock); return ret; } static void hugetlb_vm_op_open(struct vm_area_struct *vma) { struct resv_map *reservations = vma_resv_map(vma); /* * This new VMA should share its siblings reservation map if present. * The VMA will only ever have a valid reservation map pointer where * it is being copied for another still existing VMA. As that VMA * has a reference to the reservation map it cannot disappear until * after this open call completes. It is therefore safe to take a * new reference here without additional locking. */ if (reservations) kref_get(&reservations->refs); } static void hugetlb_vm_op_close(struct vm_area_struct *vma) { struct hstate *h = hstate_vma(vma); struct resv_map *reservations = vma_resv_map(vma); unsigned long reserve; unsigned long start; unsigned long end; if (reservations) { start = vma_hugecache_offset(h, vma, vma->vm_start); end = vma_hugecache_offset(h, vma, vma->vm_end); reserve = (end - start) - region_count(&reservations->regions, start, end); kref_put(&reservations->refs, resv_map_release); if (reserve) { hugetlb_acct_memory(h, -reserve); hugetlb_put_quota(vma->vm_file->f_mapping, reserve); } } } /* * We cannot handle pagefaults against hugetlb pages at all. They cause * handle_mm_fault() to try to instantiate regular-sized pages in the * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get * this far. */ static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { BUG(); return 0; } const struct vm_operations_struct hugetlb_vm_ops = { .fault = hugetlb_vm_op_fault, .open = hugetlb_vm_op_open, .close = hugetlb_vm_op_close, }; static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable) { pte_t entry; if (writable) { entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); } else { entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); } entry = pte_mkyoung(entry); entry = pte_mkhuge(entry); return entry; } static void set_huge_ptep_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t entry; entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) update_mmu_cache(vma, address, ptep); } int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) { pte_t *src_pte, *dst_pte, entry; struct page *ptepage; unsigned long addr; int cow; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { src_pte = huge_pte_offset(src, addr); if (!src_pte) continue; dst_pte = huge_pte_alloc(dst, addr, sz); if (!dst_pte) goto nomem; /* If the pagetables are shared don't copy or take references */ if (dst_pte == src_pte) continue; spin_lock(&dst->page_table_lock); spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); if (!huge_pte_none(huge_ptep_get(src_pte))) { if (cow) huge_ptep_set_wrprotect(src, addr, src_pte); entry = huge_ptep_get(src_pte); ptepage = pte_page(entry); get_page(ptepage); page_dup_rmap(ptepage); set_huge_pte_at(dst, addr, dst_pte, entry); } spin_unlock(&src->page_table_lock); spin_unlock(&dst->page_table_lock); } return 0; nomem: return -ENOMEM; } static int is_hugetlb_entry_migration(pte_t pte) { swp_entry_t swp; if (huge_pte_none(pte) || pte_present(pte)) return 0; swp = pte_to_swp_entry(pte); if (non_swap_entry(swp) && is_migration_entry(swp)) return 1; else return 0; } static int is_hugetlb_entry_hwpoisoned(pte_t pte) { swp_entry_t swp; if (huge_pte_none(pte) || pte_present(pte)) return 0; swp = pte_to_swp_entry(pte); if (non_swap_entry(swp) && is_hwpoison_entry(swp)) return 1; else return 0; } void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { struct mm_struct *mm = vma->vm_mm; unsigned long address; pte_t *ptep; pte_t pte; struct page *page; struct page *tmp; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); /* * A page gathering list, protected by per file i_mmap_mutex. The * lock is used to avoid list corruption from multiple unmapping * of the same page since we are using page->lru. */ LIST_HEAD(page_list); WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); BUG_ON(end & ~huge_page_mask(h)); mmu_notifier_invalidate_range_start(mm, start, end); spin_lock(&mm->page_table_lock); for (address = start; address < end; address += sz) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; if (huge_pmd_unshare(mm, &address, ptep)) continue; /* * If a reference page is supplied, it is because a specific * page is being unmapped, not a range. Ensure the page we * are about to unmap is the actual page of interest. */ if (ref_page) { pte = huge_ptep_get(ptep); if (huge_pte_none(pte)) continue; page = pte_page(pte); if (page != ref_page) continue; /* * Mark the VMA as having unmapped its page so that * future faults in this VMA will fail rather than * looking like data was lost */ set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); } pte = huge_ptep_get_and_clear(mm, address, ptep); if (huge_pte_none(pte)) continue; /* * HWPoisoned hugepage is already unmapped and dropped reference */ if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) continue; page = pte_page(pte); if (pte_dirty(pte)) set_page_dirty(page); list_add(&page->lru, &page_list); /* Bail out after unmapping reference page if supplied */ if (ref_page) break; } flush_tlb_range(vma, start, end); spin_unlock(&mm->page_table_lock); mmu_notifier_invalidate_range_end(mm, start, end); list_for_each_entry_safe(page, tmp, &page_list, lru) { page_remove_rmap(page); list_del(&page->lru); put_page(page); } } void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); __unmap_hugepage_range(vma, start, end, ref_page); mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); } /* * This is called when the original mapper is failing to COW a MAP_PRIVATE * mappping it owns the reserve page for. The intention is to unmap the page * from other VMAs and let the children be SIGKILLed if they are faulting the * same region. */ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) { struct hstate *h = hstate_vma(vma); struct vm_area_struct *iter_vma; struct address_space *mapping; struct prio_tree_iter iter; pgoff_t pgoff; /* * vm_pgoff is in PAGE_SIZE units, hence the different calculation * from page cache lookup which is in HPAGE_SIZE units. */ address = address & huge_page_mask(h); pgoff = vma_hugecache_offset(h, vma, address); mapping = (struct address_space *)page_private(page); /* * Take the mapping lock for the duration of the table walk. As * this mapping should be shared between all the VMAs, * __unmap_hugepage_range() is called as the lock is already held */ mutex_lock(&mapping->i_mmap_mutex); vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) continue; /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these * areas. This is because a future no-page fault on this VMA * could insert a zeroed page instead of the data existing * from the time of fork. This would look like data corruption */ if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) __unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), page); } mutex_unlock(&mapping->i_mmap_mutex); return 1; } /* * Hugetlb_cow() should be called with page lock of the original hugepage held. * Called with hugetlb_instantiation_mutex held and pte_page locked so we * cannot race with other handlers or page migration. * Keep the pte_same checks anyway to make transition from the mutex easier. */ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t pte, struct page *pagecache_page) { struct hstate *h = hstate_vma(vma); struct page *old_page, *new_page; int avoidcopy; int outside_reserve = 0; old_page = pte_page(pte); retry_avoidcopy: /* If no-one else is actually using this page, avoid the copy * and just make the page writable */ avoidcopy = (page_mapcount(old_page) == 1); if (avoidcopy) { if (PageAnon(old_page)) page_move_anon_rmap(old_page, vma, address); set_huge_ptep_writable(vma, address, ptep); return 0; } /* * If the process that created a MAP_PRIVATE mapping is about to * perform a COW due to a shared page count, attempt to satisfy * the allocation without using the existing reserves. The pagecache * page is used to determine if the reserve at this address was * consumed or not. If reserves were used, a partial faulted mapping * at the time of fork() could consume its reserves on COW instead * of the full address range. */ if (!(vma->vm_flags & VM_MAYSHARE) && is_vma_resv_set(vma, HPAGE_RESV_OWNER) && old_page != pagecache_page) outside_reserve = 1; page_cache_get(old_page); /* Drop page_table_lock as buddy allocator may be called */ spin_unlock(&mm->page_table_lock); new_page = alloc_huge_page(vma, address, outside_reserve); if (IS_ERR(new_page)) { page_cache_release(old_page); /* * If a process owning a MAP_PRIVATE mapping fails to COW, * it is due to references held by a child and an insufficient * huge page pool. To guarantee the original mappers * reliability, unmap the page from child processes. The child * may get SIGKILLed if it later faults. */ if (outside_reserve) { BUG_ON(huge_pte_none(pte)); if (unmap_ref_private(mm, vma, old_page, address)) { BUG_ON(page_count(old_page) != 1); BUG_ON(huge_pte_none(pte)); spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & huge_page_mask(h)); if (likely(pte_same(huge_ptep_get(ptep), pte))) goto retry_avoidcopy; /* * race occurs while re-acquiring page_table_lock, and * our job is done. */ return 0; } WARN_ON_ONCE(1); } /* Caller expects lock to be held */ spin_lock(&mm->page_table_lock); return -PTR_ERR(new_page); } /* * When the original hugepage is shared one, it does not have * anon_vma prepared. */ if (unlikely(anon_vma_prepare(vma))) { page_cache_release(new_page); page_cache_release(old_page); /* Caller expects lock to be held */ spin_lock(&mm->page_table_lock); return VM_FAULT_OOM; } copy_user_huge_page(new_page, old_page, address, vma, pages_per_huge_page(h)); __SetPageUptodate(new_page); /* * Retake the page_table_lock to check for racing updates * before the page tables are altered */ spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & huge_page_mask(h)); if (likely(pte_same(huge_ptep_get(ptep), pte))) { /* Break COW */ mmu_notifier_invalidate_range_start(mm, address & huge_page_mask(h), (address & huge_page_mask(h)) + huge_page_size(h)); huge_ptep_clear_flush(vma, address, ptep); set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, new_page, 1)); page_remove_rmap(old_page); hugepage_add_new_anon_rmap(new_page, vma, address); /* Make the old page be freed below */ new_page = old_page; mmu_notifier_invalidate_range_end(mm, address & huge_page_mask(h), (address & huge_page_mask(h)) + huge_page_size(h)); } page_cache_release(new_page); page_cache_release(old_page); return 0; } /* Return the pagecache page at a given address within a VMA */ static struct page *hugetlbfs_pagecache_page(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping; pgoff_t idx; mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, address); return find_lock_page(mapping, idx); } /* * Return whether there is a pagecache page to back given address within VMA. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. */ static bool hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping; pgoff_t idx; struct page *page; mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, address); page = find_get_page(mapping, idx); if (page) put_page(page); return page != NULL; } static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int flags) { struct hstate *h = hstate_vma(vma); int ret = VM_FAULT_SIGBUS; int anon_rmap = 0; pgoff_t idx; unsigned long size; struct page *page; struct address_space *mapping; pte_t new_pte; /* * Currently, we are forced to kill the process in the event the * original mapper has unmapped pages from the child due to a failed * COW. Warn that such a situation has occurred as it may not be obvious */ if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { printk(KERN_WARNING "PID %d killed due to inadequate hugepage pool\n", current->pid); return ret; } mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, address); /* * Use page lock to guard against racing truncation * before we get page_table_lock. */ retry: page = find_lock_page(mapping, idx); if (!page) { size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto out; page = alloc_huge_page(vma, address, 0); if (IS_ERR(page)) { ret = -PTR_ERR(page); goto out; } clear_huge_page(page, address, pages_per_huge_page(h)); __SetPageUptodate(page); if (vma->vm_flags & VM_MAYSHARE) { int err; struct inode *inode = mapping->host; err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); if (err) { put_page(page); if (err == -EEXIST) goto retry; goto out; } spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); spin_unlock(&inode->i_lock); } else { lock_page(page); if (unlikely(anon_vma_prepare(vma))) { ret = VM_FAULT_OOM; goto backout_unlocked; } anon_rmap = 1; } } else { /* * If memory error occurs between mmap() and fault, some process * don't have hwpoisoned swap entry for errored virtual address. * So we need to block hugepage fault by PG_hwpoison bit check. */ if (unlikely(PageHWPoison(page))) { ret = VM_FAULT_HWPOISON | VM_FAULT_SET_HINDEX(h - hstates); goto backout_unlocked; } } /* * If we are going to COW a private mapping later, we examine the * pending reservations for this page now. This will ensure that * any allocations necessary to record that reservation occur outside * the spinlock. */ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) if (vma_needs_reservation(h, vma, address) < 0) { ret = VM_FAULT_OOM; goto backout_unlocked; } spin_lock(&mm->page_table_lock); size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto backout; ret = 0; if (!huge_pte_none(huge_ptep_get(ptep))) goto backout; if (anon_rmap) hugepage_add_new_anon_rmap(page, vma, address); else page_dup_rmap(page); new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, address, ptep, new_pte); if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); } spin_unlock(&mm->page_table_lock); unlock_page(page); out: return ret; backout: spin_unlock(&mm->page_table_lock); backout_unlocked: unlock_page(page); put_page(page); goto out; } int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pte_t *ptep; pte_t entry; int ret; struct page *page = NULL; struct page *pagecache_page = NULL; static DEFINE_MUTEX(hugetlb_instantiation_mutex); struct hstate *h = hstate_vma(vma); address &= huge_page_mask(h); ptep = huge_pte_offset(mm, address); if (ptep) { entry = huge_ptep_get(ptep); if (unlikely(is_hugetlb_entry_migration(entry))) { migration_entry_wait(mm, (pmd_t *)ptep, address); return 0; } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) return VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(h - hstates); } ptep = huge_pte_alloc(mm, address, huge_page_size(h)); if (!ptep) return VM_FAULT_OOM; /* * Serialize hugepage allocation and instantiation, so that we don't * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ mutex_lock(&hugetlb_instantiation_mutex); entry = huge_ptep_get(ptep); if (huge_pte_none(entry)) { ret = hugetlb_no_page(mm, vma, address, ptep, flags); goto out_mutex; } ret = 0; /* * If we are going to COW the mapping later, we examine the pending * reservations for this page now. This will ensure that any * allocations necessary to record that reservation occur outside the * spinlock. For private mappings, we also lookup the pagecache * page now as it is used to determine if a reservation has been * consumed. */ if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) { if (vma_needs_reservation(h, vma, address) < 0) { ret = VM_FAULT_OOM; goto out_mutex; } if (!(vma->vm_flags & VM_MAYSHARE)) pagecache_page = hugetlbfs_pagecache_page(h, vma, address); } /* * hugetlb_cow() requires page locks of pte_page(entry) and * pagecache_page, so here we need take the former one * when page != pagecache_page or !pagecache_page. * Note that locking order is always pagecache_page -> page, * so no worry about deadlock. */ page = pte_page(entry); if (page != pagecache_page) lock_page(page); spin_lock(&mm->page_table_lock); /* Check for a racing update before calling hugetlb_cow */ if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) goto out_page_table_lock; if (flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) { ret = hugetlb_cow(mm, vma, address, ptep, entry, pagecache_page); goto out_page_table_lock; } entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (huge_ptep_set_access_flags(vma, address, ptep, entry, flags & FAULT_FLAG_WRITE)) update_mmu_cache(vma, address, ptep); out_page_table_lock: spin_unlock(&mm->page_table_lock); if (pagecache_page) { unlock_page(pagecache_page); put_page(pagecache_page); } if (page != pagecache_page) unlock_page(page); out_mutex: mutex_unlock(&hugetlb_instantiation_mutex); return ret; } /* Can be overriden by architectures */ __attribute__((weak)) struct page * follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int write) { BUG(); return NULL; } int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i, unsigned int flags) { unsigned long pfn_offset; unsigned long vaddr = *position; int remainder = *length; struct hstate *h = hstate_vma(vma); spin_lock(&mm->page_table_lock); while (vaddr < vma->vm_end && remainder) { pte_t *pte; int absent; struct page *page; /* * Some archs (sparc64, sh*) have multiple pte_ts to * each hugepage. We have to make sure we get the * first, for the page indexing below to work. */ pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); absent = !pte || huge_pte_none(huge_ptep_get(pte)); /* * When coredumping, it suits get_dump_page if we just return * an error where there's an empty slot with no huge pagecache * to back it. This way, we avoid allocating a hugepage, and * the sparse dumpfile avoids allocating disk blocks, but its * huge holes still show up with zeroes where they need to be. */ if (absent && (flags & FOLL_DUMP) && !hugetlbfs_pagecache_present(h, vma, vaddr)) { remainder = 0; break; } if (absent || ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { int ret; spin_unlock(&mm->page_table_lock); ret = hugetlb_fault(mm, vma, vaddr, (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); spin_lock(&mm->page_table_lock); if (!(ret & VM_FAULT_ERROR)) continue; remainder = 0; break; } pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; page = pte_page(huge_ptep_get(pte)); same_page: if (pages) { pages[i] = mem_map_offset(page, pfn_offset); get_page(pages[i]); } if (vmas) vmas[i] = vma; vaddr += PAGE_SIZE; ++pfn_offset; --remainder; ++i; if (vaddr < vma->vm_end && remainder && pfn_offset < pages_per_huge_page(h)) { /* * We use pfn_offset to avoid touching the pageframes * of this compound page. */ goto same_page; } } spin_unlock(&mm->page_table_lock); *length = remainder; *position = vaddr; return i ? i : -EFAULT; } void hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) { struct mm_struct *mm = vma->vm_mm; unsigned long start = address; pte_t *ptep; pte_t pte; struct hstate *h = hstate_vma(vma); BUG_ON(address >= end); flush_cache_range(vma, address, end); mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); spin_lock(&mm->page_table_lock); for (; address < end; address += huge_page_size(h)) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; if (huge_pmd_unshare(mm, &address, ptep)) continue; if (!huge_pte_none(huge_ptep_get(ptep))) { pte = huge_ptep_get_and_clear(mm, address, ptep); pte = pte_mkhuge(pte_modify(pte, newprot)); set_huge_pte_at(mm, address, ptep, pte); } } spin_unlock(&mm->page_table_lock); mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); flush_tlb_range(vma, start, end); } int hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) { long ret, chg; struct hstate *h = hstate_inode(inode); /* * Only apply hugepage reservation if asked. At fault time, an * attempt will be made for VM_NORESERVE to allocate a page * and filesystem quota without using reserves */ if (vm_flags & VM_NORESERVE) return 0; /* * Shared mappings base their reservation on the number of pages that * are already allocated on behalf of the file. Private mappings need * to reserve the full area even if read-only as mprotect() may be * called to make the mapping read-write. Assume !vma is a shm mapping */ if (!vma || vma->vm_flags & VM_MAYSHARE) chg = region_chg(&inode->i_mapping->private_list, from, to); else { struct resv_map *resv_map = resv_map_alloc(); if (!resv_map) return -ENOMEM; chg = to - from; set_vma_resv_map(vma, resv_map); set_vma_resv_flags(vma, HPAGE_RESV_OWNER); } if (chg < 0) return chg; /* There must be enough filesystem quota for the mapping */ if (hugetlb_get_quota(inode->i_mapping, chg)) return -ENOSPC; /* * Check enough hugepages are available for the reservation. * Hand back the quota if there are not */ ret = hugetlb_acct_memory(h, chg); if (ret < 0) { hugetlb_put_quota(inode->i_mapping, chg); return ret; } /* * Account for the reservations made. Shared mappings record regions * that have reservations as they are shared by multiple VMAs. * When the last VMA disappears, the region map says how much * the reservation was and the page cache tells how much of * the reservation was consumed. Private mappings are per-VMA and * only the consumed reservations are tracked. When the VMA * disappears, the original reservation is the VMA size and the * consumed reservations are stored in the map. Hence, nothing * else has to be done for private mappings here */ if (!vma || vma->vm_flags & VM_MAYSHARE) region_add(&inode->i_mapping->private_list, from, to); return 0; } void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) { struct hstate *h = hstate_inode(inode); long chg = region_truncate(&inode->i_mapping->private_list, offset); spin_lock(&inode->i_lock); inode->i_blocks -= (blocks_per_huge_page(h) * freed); spin_unlock(&inode->i_lock); hugetlb_put_quota(inode->i_mapping, (chg - freed)); hugetlb_acct_memory(h, -(chg - freed)); } #ifdef CONFIG_MEMORY_FAILURE /* Should be called in hugetlb_lock */ static int is_hugepage_on_freelist(struct page *hpage) { struct page *page; struct page *tmp; struct hstate *h = page_hstate(hpage); int nid = page_to_nid(hpage); list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru) if (page == hpage) return 1; return 0; } /* * This function is called from memory failure code. * Assume the caller holds page lock of the head page. */ int dequeue_hwpoisoned_huge_page(struct page *hpage) { struct hstate *h = page_hstate(hpage); int nid = page_to_nid(hpage); int ret = -EBUSY; spin_lock(&hugetlb_lock); if (is_hugepage_on_freelist(hpage)) { list_del(&hpage->lru); set_page_refcounted(hpage); h->free_huge_pages--; h->free_huge_pages_node[nid]--; ret = 0; } spin_unlock(&hugetlb_lock); return ret; } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3640_2
crossvul-cpp_data_good_5825_0
/* * Copyright 2007 Bobby Bingham * Copyright 2012 Robert Nagy <ronag89 gmail com> * Copyright 2012 Anton Khirnov <anton khirnov net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * a filter enforcing given constant framerate */ #include <float.h> #include "libavutil/common.h" #include "libavutil/fifo.h" #include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavutil/parseutils.h" #include "avfilter.h" #include "internal.h" #include "video.h" typedef struct FPSContext { const AVClass *class; AVFifoBuffer *fifo; ///< store frames until we get two successive timestamps /* timestamps in input timebase */ int64_t first_pts; ///< pts of the first frame that arrived on this filter int64_t pts; ///< pts of the first frame currently in the fifo double start_time; ///< pts, in seconds, of the expected first frame AVRational framerate; ///< target framerate int rounding; ///< AVRounding method for timestamps /* statistics */ int frames_in; ///< number of frames on input int frames_out; ///< number of frames on output int dup; ///< number of frames duplicated int drop; ///< number of framed dropped } FPSContext; #define OFFSET(x) offsetof(FPSContext, x) #define V AV_OPT_FLAG_VIDEO_PARAM #define F AV_OPT_FLAG_FILTERING_PARAM static const AVOption fps_options[] = { { "fps", "A string describing desired output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, .flags = V|F }, { "start_time", "Assume the first PTS should be this value.", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX}, -DBL_MAX, DBL_MAX, V }, { "round", "set rounding method for timestamps", OFFSET(rounding), AV_OPT_TYPE_INT, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" }, { "zero", "round towards 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_ZERO }, 0, 5, V|F, "round" }, { "inf", "round away from 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_INF }, 0, 5, V|F, "round" }, { "down", "round towards -infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_DOWN }, 0, 5, V|F, "round" }, { "up", "round towards +infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_UP }, 0, 5, V|F, "round" }, { "near", "round to nearest", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" }, { NULL }, }; AVFILTER_DEFINE_CLASS(fps); static av_cold int init(AVFilterContext *ctx) { FPSContext *s = ctx->priv; if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*)))) return AVERROR(ENOMEM); s->pts = AV_NOPTS_VALUE; s->first_pts = AV_NOPTS_VALUE; av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den); return 0; } static void flush_fifo(AVFifoBuffer *fifo) { while (av_fifo_size(fifo)) { AVFrame *tmp; av_fifo_generic_read(fifo, &tmp, sizeof(tmp), NULL); av_frame_free(&tmp); } } static av_cold void uninit(AVFilterContext *ctx) { FPSContext *s = ctx->priv; if (s->fifo) { s->drop += av_fifo_size(s->fifo) / sizeof(AVFrame*); flush_fifo(s->fifo); av_fifo_free(s->fifo); } av_log(ctx, AV_LOG_VERBOSE, "%d frames in, %d frames out; %d frames dropped, " "%d frames duplicated.\n", s->frames_in, s->frames_out, s->drop, s->dup); } static int config_props(AVFilterLink* link) { FPSContext *s = link->src->priv; link->time_base = av_inv_q(s->framerate); link->frame_rate= s->framerate; link->w = link->src->inputs[0]->w; link->h = link->src->inputs[0]->h; return 0; } static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; FPSContext *s = ctx->priv; int frames_out = s->frames_out; int ret = 0; while (ret >= 0 && s->frames_out == frames_out) ret = ff_request_frame(ctx->inputs[0]); /* flush the fifo */ if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) { int i; for (i = 0; av_fifo_size(s->fifo); i++) { AVFrame *buf; av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL); buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base, outlink->time_base) + s->frames_out; if ((ret = ff_filter_frame(outlink, buf)) < 0) return ret; s->frames_out++; } return 0; } return ret; } static int write_to_fifo(AVFifoBuffer *fifo, AVFrame *buf) { int ret; if (!av_fifo_space(fifo) && (ret = av_fifo_realloc2(fifo, 2*av_fifo_size(fifo)))) { av_frame_free(&buf); return ret; } av_fifo_generic_write(fifo, &buf, sizeof(buf), NULL); return 0; } static int filter_frame(AVFilterLink *inlink, AVFrame *buf) { AVFilterContext *ctx = inlink->dst; FPSContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int64_t delta; int i, ret; s->frames_in++; /* discard frames until we get the first timestamp */ if (s->pts == AV_NOPTS_VALUE) { if (buf->pts != AV_NOPTS_VALUE) { ret = write_to_fifo(s->fifo, buf); if (ret < 0) return ret; if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) { double first_pts = s->start_time * AV_TIME_BASE; first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX); s->first_pts = s->pts = av_rescale_q(first_pts, AV_TIME_BASE_Q, inlink->time_base); av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")\n", s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q, outlink->time_base)); } else { s->first_pts = s->pts = buf->pts; } } else { av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no " "timestamp.\n"); av_frame_free(&buf); s->drop++; } return 0; } /* now wait for the next timestamp */ if (buf->pts == AV_NOPTS_VALUE || av_fifo_size(s->fifo) <= 0) { return write_to_fifo(s->fifo, buf); } /* number of output frames */ delta = av_rescale_q_rnd(buf->pts - s->pts, inlink->time_base, outlink->time_base, s->rounding); if (delta < 1) { /* drop the frame and everything buffered except the first */ AVFrame *tmp; int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*); av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop); s->drop += drop; av_fifo_generic_read(s->fifo, &tmp, sizeof(tmp), NULL); flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, tmp); av_frame_free(&buf); return ret; } /* can output >= 1 frames */ for (i = 0; i < delta; i++) { AVFrame *buf_out; av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL); /* duplicate the frame if needed */ if (!av_fifo_size(s->fifo) && i < delta - 1) { AVFrame *dup = av_frame_clone(buf_out); av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n"); if (dup) ret = write_to_fifo(s->fifo, dup); else ret = AVERROR(ENOMEM); if (ret < 0) { av_frame_free(&buf_out); av_frame_free(&buf); return ret; } s->dup++; } buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base, outlink->time_base) + s->frames_out; if ((ret = ff_filter_frame(outlink, buf_out)) < 0) { av_frame_free(&buf); return ret; } s->frames_out++; } flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, buf); s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base); return ret; } static const AVFilterPad avfilter_vf_fps_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .filter_frame = filter_frame, }, { NULL } }; static const AVFilterPad avfilter_vf_fps_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .request_frame = request_frame, .config_props = config_props }, { NULL } }; AVFilter avfilter_vf_fps = { .name = "fps", .description = NULL_IF_CONFIG_SMALL("Force constant framerate."), .init = init, .uninit = uninit, .priv_size = sizeof(FPSContext), .priv_class = &fps_class, .inputs = avfilter_vf_fps_inputs, .outputs = avfilter_vf_fps_outputs, };
./CrossVul/dataset_final_sorted/CWE-399/c/good_5825_0
crossvul-cpp_data_bad_5366_2
/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <assert.h> #include <stddef.h> #include <apr_strings.h> #include <httpd.h> #include <http_core.h> #include <http_connection.h> #include <http_log.h> #include <nghttp2/nghttp2.h> #include "h2_private.h" #include "h2.h" #include "h2_bucket_beam.h" #include "h2_conn.h" #include "h2_config.h" #include "h2_h2.h" #include "h2_mplx.h" #include "h2_push.h" #include "h2_request.h" #include "h2_headers.h" #include "h2_session.h" #include "h2_stream.h" #include "h2_task.h" #include "h2_ctx.h" #include "h2_task.h" #include "h2_util.h" static int state_transition[][7] = { /* ID OP RL RR CI CO CL */ /*ID*/{ 1, 0, 0, 0, 0, 0, 0 }, /*OP*/{ 1, 1, 0, 0, 0, 0, 0 }, /*RL*/{ 0, 0, 1, 0, 0, 0, 0 }, /*RR*/{ 0, 0, 0, 1, 0, 0, 0 }, /*CI*/{ 1, 1, 0, 0, 1, 0, 0 }, /*CO*/{ 1, 1, 0, 0, 0, 1, 0 }, /*CL*/{ 1, 1, 0, 0, 1, 1, 1 }, }; static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag) { if (APLOG_C_IS_LEVEL(s->session->c, lvl)) { conn_rec *c = s->session->c; char buffer[4 * 1024]; const char *line = "(null)"; apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); len = h2_util_bb_print(buffer, bmax, tag, "", s->out_buffer); ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s", c->log_id, len? buffer : line); } } static int set_state(h2_stream *stream, h2_stream_state_t state) { int allowed = state_transition[state][stream->state]; if (allowed) { stream->state = state; return 1; } ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, APLOGNO(03081) "h2_stream(%ld-%d): invalid state transition from %d to %d", stream->session->id, stream->id, stream->state, state); return 0; } static int close_input(h2_stream *stream) { switch (stream->state) { case H2_STREAM_ST_CLOSED_INPUT: case H2_STREAM_ST_CLOSED: return 0; /* ignore, idempotent */ case H2_STREAM_ST_CLOSED_OUTPUT: /* both closed now */ set_state(stream, H2_STREAM_ST_CLOSED); break; default: /* everything else we jump to here */ set_state(stream, H2_STREAM_ST_CLOSED_INPUT); break; } return 1; } static int input_closed(h2_stream *stream) { switch (stream->state) { case H2_STREAM_ST_OPEN: case H2_STREAM_ST_CLOSED_OUTPUT: return 0; default: return 1; } } static int close_output(h2_stream *stream) { switch (stream->state) { case H2_STREAM_ST_CLOSED_OUTPUT: case H2_STREAM_ST_CLOSED: return 0; /* ignore, idempotent */ case H2_STREAM_ST_CLOSED_INPUT: /* both closed now */ set_state(stream, H2_STREAM_ST_CLOSED); break; default: /* everything else we jump to here */ set_state(stream, H2_STREAM_ST_CLOSED_OUTPUT); break; } return 1; } static int input_open(const h2_stream *stream) { switch (stream->state) { case H2_STREAM_ST_OPEN: case H2_STREAM_ST_CLOSED_OUTPUT: return 1; default: return 0; } } static int output_open(h2_stream *stream) { switch (stream->state) { case H2_STREAM_ST_OPEN: case H2_STREAM_ST_CLOSED_INPUT: return 1; default: return 0; } } static void prep_output(h2_stream *stream) { conn_rec *c = stream->session->c; if (!stream->out_buffer) { stream->out_buffer = apr_brigade_create(stream->pool, c->bucket_alloc); } } static void prepend_response(h2_stream *stream, h2_headers *response) { conn_rec *c = stream->session->c; apr_bucket *b; prep_output(stream); b = h2_bucket_headers_create(c->bucket_alloc, response); APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b); } static apr_status_t stream_pool_cleanup(void *ctx) { h2_stream *stream = ctx; apr_status_t status; ap_assert(stream->can_be_cleaned); if (stream->files) { apr_file_t *file; int i; for (i = 0; i < stream->files->nelts; ++i) { file = APR_ARRAY_IDX(stream->files, i, apr_file_t*); status = apr_file_close(file); ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, stream->session->c, "h2_stream(%ld-%d): destroy, closed file %d", stream->session->id, stream->id, i); } stream->files = NULL; } return APR_SUCCESS; } h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session, int initiated_on) { h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream)); stream->id = id; stream->initiated_on = initiated_on; stream->created = apr_time_now(); stream->state = H2_STREAM_ST_IDLE; stream->pool = pool; stream->session = session; stream->can_be_cleaned = 1; h2_beam_create(&stream->input, pool, id, "input", H2_BEAM_OWNER_SEND, 0); h2_beam_create(&stream->output, pool, id, "output", H2_BEAM_OWNER_RECV, 0); set_state(stream, H2_STREAM_ST_OPEN); apr_pool_cleanup_register(pool, stream, stream_pool_cleanup, apr_pool_cleanup_null); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03082) "h2_stream(%ld-%d): opened", session->id, stream->id); return stream; } void h2_stream_cleanup(h2_stream *stream) { apr_status_t status; ap_assert(stream); if (stream->out_buffer) { /* remove any left over output buckets that may still have * references into request pools */ apr_brigade_cleanup(stream->out_buffer); } h2_beam_abort(stream->input); status = h2_beam_wait_empty(stream->input, APR_NONBLOCK_READ); if (status == APR_EAGAIN) { ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c, "h2_stream(%ld-%d): wait on input drain", stream->session->id, stream->id); status = h2_beam_wait_empty(stream->input, APR_BLOCK_READ); ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c, "h2_stream(%ld-%d): input drain returned", stream->session->id, stream->id); } } void h2_stream_destroy(h2_stream *stream) { ap_assert(stream); ap_assert(!h2_mplx_stream_get(stream->session->mplx, stream->id)); ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c, "h2_stream(%ld-%d): destroy", stream->session->id, stream->id); stream->can_be_cleaned = 1; if (stream->pool) { apr_pool_destroy(stream->pool); } } void h2_stream_eos_destroy(h2_stream *stream) { h2_session_stream_done(stream->session, stream); /* stream possibly destroyed */ } apr_pool_t *h2_stream_detach_pool(h2_stream *stream) { apr_pool_t *pool = stream->pool; stream->pool = NULL; return pool; } void h2_stream_rst(h2_stream *stream, int error_code) { stream->rst_error = error_code; close_input(stream); close_output(stream); if (stream->out_buffer) { apr_brigade_cleanup(stream->out_buffer); } ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, "h2_stream(%ld-%d): reset, error=%d", stream->session->id, stream->id, error_code); } apr_status_t h2_stream_set_request_rec(h2_stream *stream, request_rec *r) { h2_request *req; apr_status_t status; ap_assert(stream->request == NULL); ap_assert(stream->rtmp == NULL); if (stream->rst_error) { return APR_ECONNRESET; } status = h2_request_rcreate(&req, stream->pool, r); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03058) "h2_request(%d): set_request_rec %s host=%s://%s%s", stream->id, req->method, req->scheme, req->authority, req->path); stream->rtmp = req; return status; } apr_status_t h2_stream_set_request(h2_stream *stream, const h2_request *r) { ap_assert(stream->request == NULL); ap_assert(stream->rtmp == NULL); stream->rtmp = h2_request_clone(stream->pool, r); return APR_SUCCESS; } static apr_status_t add_trailer(h2_stream *stream, const char *name, size_t nlen, const char *value, size_t vlen) { conn_rec *c = stream->session->c; char *hname, *hvalue; if (nlen == 0 || name[0] == ':') { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c, APLOGNO(03060) "h2_request(%ld-%d): pseudo header in trailer", c->id, stream->id); return APR_EINVAL; } if (h2_req_ignore_trailer(name, nlen)) { return APR_SUCCESS; } if (!stream->trailers) { stream->trailers = apr_table_make(stream->pool, 5); } hname = apr_pstrndup(stream->pool, name, nlen); hvalue = apr_pstrndup(stream->pool, value, vlen); h2_util_camel_case_header(hname, nlen); apr_table_mergen(stream->trailers, hname, hvalue); return APR_SUCCESS; } apr_status_t h2_stream_add_header(h2_stream *stream, const char *name, size_t nlen, const char *value, size_t vlen) { ap_assert(stream); if (!stream->has_response) { if (name[0] == ':') { if ((vlen) > stream->session->s->limit_req_line) { /* pseudo header: approximation of request line size check */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, "h2_stream(%ld-%d): pseudo header %s too long", stream->session->id, stream->id, name); return h2_stream_set_error(stream, HTTP_REQUEST_URI_TOO_LARGE); } } else if ((nlen + 2 + vlen) > stream->session->s->limit_req_fieldsize) { /* header too long */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, "h2_stream(%ld-%d): header %s too long", stream->session->id, stream->id, name); return h2_stream_set_error(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE); } if (name[0] != ':') { ++stream->request_headers_added; if (stream->request_headers_added > stream->session->s->limit_req_fields) { /* too many header lines */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, "h2_stream(%ld-%d): too many header lines", stream->session->id, stream->id); return h2_stream_set_error(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE); } } } if (h2_stream_is_scheduled(stream)) { return add_trailer(stream, name, nlen, value, vlen); } else { if (!stream->rtmp) { stream->rtmp = h2_req_create(stream->id, stream->pool, NULL, NULL, NULL, NULL, NULL, 0); } if (stream->state != H2_STREAM_ST_OPEN) { return APR_ECONNRESET; } return h2_request_add_header(stream->rtmp, stream->pool, name, nlen, value, vlen); } } apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled, h2_stream_pri_cmp *cmp, void *ctx) { apr_status_t status = APR_EINVAL; ap_assert(stream); ap_assert(stream->session); ap_assert(stream->session->mplx); if (!stream->scheduled) { if (eos) { close_input(stream); } if (h2_stream_is_ready(stream)) { /* already have a resonse, probably a HTTP error code */ return h2_mplx_process(stream->session->mplx, stream, cmp, ctx); } else if (!stream->request && stream->rtmp) { /* This is the common case: a h2_request was being assembled, now * it gets finalized and checked for completness */ status = h2_request_end_headers(stream->rtmp, stream->pool, eos); if (status == APR_SUCCESS) { stream->rtmp->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS); stream->request = stream->rtmp; stream->rtmp = NULL; stream->scheduled = 1; stream->push_policy = h2_push_policy_determine(stream->request->headers, stream->pool, push_enabled); status = h2_mplx_process(stream->session->mplx, stream, cmp, ctx); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, "h2_stream(%ld-%d): scheduled %s %s://%s%s " "chunked=%d", stream->session->id, stream->id, stream->request->method, stream->request->scheme, stream->request->authority, stream->request->path, stream->request->chunked); return status; } } else { status = APR_ECONNRESET; } } h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c, "h2_stream(%ld-%d): RST=2 (internal err) %s %s://%s%s", stream->session->id, stream->id, stream->request->method, stream->request->scheme, stream->request->authority, stream->request->path); return status; } int h2_stream_is_scheduled(const h2_stream *stream) { return stream->scheduled; } apr_status_t h2_stream_close_input(h2_stream *stream) { conn_rec *c = stream->session->c; apr_status_t status; apr_bucket_brigade *tmp; apr_bucket *b; ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, "h2_stream(%ld-%d): closing input", stream->session->id, stream->id); if (stream->rst_error) { return APR_ECONNRESET; } tmp = apr_brigade_create(stream->pool, c->bucket_alloc); if (stream->trailers && !apr_is_empty_table(stream->trailers)) { h2_headers *r = h2_headers_create(HTTP_OK, stream->trailers, NULL, stream->pool); b = h2_bucket_headers_create(c->bucket_alloc, r); APR_BRIGADE_INSERT_TAIL(tmp, b); stream->trailers = NULL; } b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(tmp, b); status = h2_beam_send(stream->input, tmp, APR_BLOCK_READ); apr_brigade_destroy(tmp); return status; } apr_status_t h2_stream_write_data(h2_stream *stream, const char *data, size_t len, int eos) { conn_rec *c = stream->session->c; apr_status_t status = APR_SUCCESS; apr_bucket_brigade *tmp; ap_assert(stream); if (!stream->input) { return APR_EOF; } if (input_closed(stream) || !stream->request) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_stream(%ld-%d): writing denied, closed=%d, eoh=%d", stream->session->id, stream->id, input_closed(stream), stream->request != NULL); return APR_EINVAL; } ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_stream(%ld-%d): add %ld input bytes", stream->session->id, stream->id, (long)len); tmp = apr_brigade_create(stream->pool, c->bucket_alloc); apr_brigade_write(tmp, NULL, NULL, data, len); status = h2_beam_send(stream->input, tmp, APR_BLOCK_READ); apr_brigade_destroy(tmp); stream->in_data_frames++; stream->in_data_octets += len; if (eos) { return h2_stream_close_input(stream); } return status; } static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount) { conn_rec *c = stream->session->c; apr_bucket *b; apr_status_t status; if (!stream->output) { return APR_EOF; } status = h2_beam_receive(stream->output, stream->out_buffer, APR_NONBLOCK_READ, amount); ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c, "h2_stream(%ld-%d): beam_received", stream->session->id, stream->id); /* The buckets we reveive are using the stream->out_buffer pool as * lifetime which is exactly what we want since this is stream->pool. * * However: when we send these buckets down the core output filters, the * filter might decide to setaside them into a pool of its own. And it * might decide, after having sent the buckets, to clear its pool. * * This is problematic for file buckets because it then closed the contained * file. Any split off buckets we sent afterwards will result in a * APR_EBADF. */ for (b = APR_BRIGADE_FIRST(stream->out_buffer); b != APR_BRIGADE_SENTINEL(stream->out_buffer); b = APR_BUCKET_NEXT(b)) { if (APR_BUCKET_IS_FILE(b)) { apr_bucket_file *f = (apr_bucket_file *)b->data; apr_pool_t *fpool = apr_file_pool_get(f->fd); if (fpool != c->pool) { apr_bucket_setaside(b, c->pool); if (!stream->files) { stream->files = apr_array_make(stream->pool, 5, sizeof(apr_file_t*)); } APR_ARRAY_PUSH(stream->files, apr_file_t*) = f->fd; } } } return status; } apr_status_t h2_stream_set_error(h2_stream *stream, int http_status) { h2_headers *response; if (h2_stream_is_ready(stream)) { return APR_EINVAL; } if (stream->rtmp) { stream->request = stream->rtmp; stream->rtmp = NULL; } response = h2_headers_die(http_status, stream->request, stream->pool); prepend_response(stream, response); h2_beam_close(stream->output); return APR_SUCCESS; } static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb) { if (bb) { apr_bucket *b = APR_BRIGADE_FIRST(bb); while (b != APR_BRIGADE_SENTINEL(bb)) { if (H2_BUCKET_IS_HEADERS(b)) { return b; } b = APR_BUCKET_NEXT(b); } } return NULL; } apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen, int *peos, h2_headers **presponse) { conn_rec *c = stream->session->c; apr_status_t status = APR_SUCCESS; apr_off_t requested; apr_bucket *b, *e; if (presponse) { *presponse = NULL; } if (stream->rst_error) { *plen = 0; *peos = 1; return APR_ECONNRESET; } if (!output_open(stream)) { return APR_ECONNRESET; } prep_output(stream); if (*plen > 0) { requested = H2MIN(*plen, H2_DATA_CHUNK_SIZE); } else { requested = H2_DATA_CHUNK_SIZE; } *plen = requested; H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_pre"); h2_util_bb_avail(stream->out_buffer, plen, peos); if (!*peos && *plen < requested) { /* try to get more data */ status = fill_buffer(stream, (requested - *plen) + H2_DATA_CHUNK_SIZE); if (APR_STATUS_IS_EOF(status)) { apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(stream->out_buffer, eos); status = APR_SUCCESS; } else if (status == APR_EAGAIN) { /* did not receive more, it's ok */ status = APR_SUCCESS; } *plen = requested; h2_util_bb_avail(stream->out_buffer, plen, peos); } H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_post"); b = APR_BRIGADE_FIRST(stream->out_buffer); while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) { e = APR_BUCKET_NEXT(b); if (APR_BUCKET_IS_FLUSH(b) || (!APR_BUCKET_IS_METADATA(b) && b->length == 0)) { APR_BUCKET_REMOVE(b); apr_bucket_destroy(b); } else { break; } b = e; } b = get_first_headers_bucket(stream->out_buffer); if (b) { /* there are HEADERS to submit */ *peos = 0; *plen = 0; if (b == APR_BRIGADE_FIRST(stream->out_buffer)) { if (presponse) { *presponse = h2_bucket_headers_get(b); APR_BUCKET_REMOVE(b); apr_bucket_destroy(b); status = APR_SUCCESS; } else { /* someone needs to retrieve the response first */ h2_mplx_keep_active(stream->session->mplx, stream->id); status = APR_EAGAIN; } } else { apr_bucket *e = APR_BRIGADE_FIRST(stream->out_buffer); while (e != APR_BRIGADE_SENTINEL(stream->out_buffer)) { if (e == b) { break; } else if (e->length != (apr_size_t)-1) { *plen += e->length; } e = APR_BUCKET_NEXT(e); } } } if (!*peos && !*plen && status == APR_SUCCESS && (!presponse || !*presponse)) { status = APR_EAGAIN; } ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "h2_stream(%ld-%d): prepare, len=%ld eos=%d", c->id, stream->id, (long)*plen, *peos); return status; } static int is_not_headers(apr_bucket *b) { return !H2_BUCKET_IS_HEADERS(b); } apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb, apr_off_t *plen, int *peos) { conn_rec *c = stream->session->c; apr_status_t status = APR_SUCCESS; if (stream->rst_error) { return APR_ECONNRESET; } status = h2_append_brigade(bb, stream->out_buffer, plen, peos, is_not_headers); if (status == APR_SUCCESS && !*peos && !*plen) { status = APR_EAGAIN; } ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c, "h2_stream(%ld-%d): read_to, len=%ld eos=%d", c->id, stream->id, (long)*plen, *peos); return status; } int h2_stream_input_is_open(const h2_stream *stream) { return input_open(stream); } apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response) { apr_status_t status = APR_SUCCESS; apr_array_header_t *pushes; int i; pushes = h2_push_collect_update(stream, stream->request, response); if (pushes && !apr_is_empty_array(pushes)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, "h2_stream(%ld-%d): found %d push candidates", stream->session->id, stream->id, pushes->nelts); for (i = 0; i < pushes->nelts; ++i) { h2_push *push = APR_ARRAY_IDX(pushes, i, h2_push*); h2_stream *s = h2_session_push(stream->session, stream, push); if (!s) { status = APR_ECONNRESET; break; } } } return status; } apr_table_t *h2_stream_get_trailers(h2_stream *stream) { return NULL; } const h2_priority *h2_stream_get_priority(h2_stream *stream, h2_headers *response) { if (response && stream->initiated_on) { const char *ctype = apr_table_get(response->headers, "content-type"); if (ctype) { /* FIXME: Not good enough, config needs to come from request->server */ return h2_config_get_priority(stream->session->config, ctype); } } return NULL; } const char *h2_stream_state_str(h2_stream *stream) { switch (stream->state) { case H2_STREAM_ST_IDLE: return "IDLE"; case H2_STREAM_ST_OPEN: return "OPEN"; case H2_STREAM_ST_RESV_LOCAL: return "RESERVED_LOCAL"; case H2_STREAM_ST_RESV_REMOTE: return "RESERVED_REMOTE"; case H2_STREAM_ST_CLOSED_INPUT: return "HALF_CLOSED_REMOTE"; case H2_STREAM_ST_CLOSED_OUTPUT: return "HALF_CLOSED_LOCAL"; case H2_STREAM_ST_CLOSED: return "CLOSED"; default: return "UNKNOWN"; } } int h2_stream_is_ready(h2_stream *stream) { if (stream->has_response) { return 1; } else if (stream->out_buffer && get_first_headers_bucket(stream->out_buffer)) { return 1; } return 0; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_5366_2
crossvul-cpp_data_good_1410_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP CCCC DDDD % % P P C D D % % PPPP C D D % % P C D D % % P CCCC DDDD % % % % % % Read/Write Photo CD Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/module.h" #include "MagickCore/utility.h" /* Forward declarations. */ static MagickBooleanType WritePCDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e c o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DecodeImage recovers the Huffman encoded luminance and chrominance % deltas. % % The format of the DecodeImage method is: % % MagickBooleanType DecodeImage(Image *image,unsigned char *luma, % unsigned char *chroma1,unsigned char *chroma2) % % A description of each parameter follows: % % o image: the address of a structure of type Image. % % o luma: the address of a character buffer that contains the % luminance information. % % o chroma1: the address of a character buffer that contains the % chrominance information. % % o chroma2: the address of a character buffer that contains the % chrominance information. % */ static MagickBooleanType DecodeImage(Image *image,unsigned char *luma, unsigned char *chroma1,unsigned char *chroma2,ExceptionInfo *exception) { #define IsSync(sum) ((sum & 0xffffff00UL) == 0xfffffe00UL) #define PCDGetBits(n) \ { \ sum=(sum << n) & 0xffffffff; \ bits-=n; \ while (bits <= 24) \ { \ if (p >= (buffer+0x800)) \ { \ count=ReadBlob(image,0x800,buffer); \ p=buffer; \ } \ sum|=((unsigned int) (*p) << (24-bits)); \ bits+=8; \ p++; \ } \ } typedef struct PCDTable { unsigned int length, sequence; MagickStatusType mask; unsigned char key; } PCDTable; PCDTable *pcd_table[3]; register ssize_t i, j; register PCDTable *r; register unsigned char *p, *q; size_t bits, length, plane, pcd_length[3], row, sum; ssize_t count, quantum; unsigned char *buffer; /* Initialize Huffman tables. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(luma != (unsigned char *) NULL); assert(chroma1 != (unsigned char *) NULL); assert(chroma2 != (unsigned char *) NULL); buffer=(unsigned char *) AcquireQuantumMemory(0x800,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); sum=0; bits=32; p=buffer+0x800; for (i=0; i < 3; i++) { pcd_table[i]=(PCDTable *) NULL; pcd_length[i]=0; } for (i=0; i < (image->columns > 1536 ? 3 : 1); i++) { PCDGetBits(8); length=(sum & 0xff)+1; pcd_table[i]=(PCDTable *) AcquireQuantumMemory(length, sizeof(*pcd_table[i])); if (pcd_table[i] == (PCDTable *) NULL) { buffer=(unsigned char *) RelinquishMagickMemory(buffer); for (j=0; j < i; j++) pcd_table[j]=(PCDTable *) RelinquishMagickMemory(pcd_table[j]); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } r=pcd_table[i]; for (j=0; j < (ssize_t) length; j++) { PCDGetBits(8); r->length=(unsigned int) (sum & 0xff)+1; if (r->length > 16) { buffer=(unsigned char *) RelinquishMagickMemory(buffer); for (j=0; j <= i; j++) pcd_table[j]=(PCDTable *) RelinquishMagickMemory(pcd_table[j]); return(MagickFalse); } PCDGetBits(16); r->sequence=(unsigned int) (sum & 0xffff) << 16; PCDGetBits(8); r->key=(unsigned char) (sum & 0xff); r->mask=(~((1U << (32-r->length))-1)); r++; } pcd_length[i]=(size_t) length; } /* Search for Sync byte. */ for (i=0; i < 1; i++) PCDGetBits(16); for (i=0; i < 1; i++) PCDGetBits(16); while ((sum & 0x00fff000UL) != 0x00fff000UL) PCDGetBits(8); while (IsSync(sum) == 0) PCDGetBits(1); /* Recover the Huffman encoded luminance and chrominance deltas. */ count=0; length=0; plane=0; row=0; q=luma; for ( ; ; ) { if (IsSync(sum) != 0) { /* Determine plane and row number. */ PCDGetBits(16); row=((sum >> 9) & 0x1fff); if (row == image->rows) break; PCDGetBits(8); plane=sum >> 30; PCDGetBits(16); switch (plane) { case 0: { q=luma+row*image->columns; count=(ssize_t) image->columns; break; } case 2: { q=chroma1+(row >> 1)*image->columns; count=(ssize_t) (image->columns >> 1); plane--; break; } case 3: { q=chroma2+(row >> 1)*image->columns; count=(ssize_t) (image->columns >> 1); plane--; break; } default: { for (i=0; i < (image->columns > 1536 ? 3 : 1); i++) pcd_table[i]=(PCDTable *) RelinquishMagickMemory(pcd_table[i]); buffer=(unsigned char *) RelinquishMagickMemory(buffer); ThrowBinaryException(CorruptImageError,"CorruptImage", image->filename); } } length=pcd_length[plane]; continue; } /* Decode luminance or chrominance deltas. */ r=pcd_table[plane]; for (i=0; ((i < (ssize_t) length) && ((sum & r->mask) != r->sequence)); i++) r++; if ((row > image->rows) || (r == (PCDTable *) NULL)) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename); while ((sum & 0x00fff000) != 0x00fff000) PCDGetBits(8); while (IsSync(sum) == 0) PCDGetBits(1); continue; } if (r->key < 128) quantum=(ssize_t) (*q)+r->key; else quantum=(ssize_t) (*q)+r->key-256; *q=(unsigned char) ((quantum < 0) ? 0 : (quantum > 255) ? 255 : quantum); q++; PCDGetBits(r->length); count--; } /* Relinquish resources. */ for (i=0; i < (image->columns > 1536 ? 3 : 1); i++) pcd_table[i]=(PCDTable *) RelinquishMagickMemory(pcd_table[i]); buffer=(unsigned char *) RelinquishMagickMemory(buffer); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P C D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPCD() returns MagickTrue if the image format type, identified by the % magick string, is PCD. % % The format of the IsPCD method is: % % MagickBooleanType IsPCD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPCD(const unsigned char *magick,const size_t length) { if (length < 2052) return(MagickFalse); if (LocaleNCompare((const char *) magick+2048,"PCD_",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P C D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPCDImage() reads a Photo CD image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. Much of the PCD decoder was derived from % the program hpcdtoppm(1) by Hadmut Danisch. % % The format of the ReadPCDImage method is: % % image=ReadPCDImage(image_info) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *OverviewImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { Image *montage_image; MontageInfo *montage_info; register Image *p; /* Create the PCD Overview image. */ for (p=image; p != (Image *) NULL; p=p->next) { (void) DeleteImageProperty(p,"label"); (void) SetImageProperty(p,"label",DefaultTileLabel,exception); } montage_info=CloneMontageInfo(image_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image_info->filename, MagickPathExtent); montage_image=MontageImageList(image_info,montage_info,image,exception); montage_info=DestroyMontageInfo(montage_info); if (montage_image == (Image *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); image=DestroyImageList(image); return(montage_image); } static void Upsample(const size_t width,const size_t height, const size_t scaled_width,unsigned char *pixels) { register ssize_t x, y; register unsigned char *p, *q, *r; /* Create a new image that is a integral size greater than an existing one. */ assert(pixels != (unsigned char *) NULL); for (y=0; y < (ssize_t) height; y++) { p=pixels+(height-1-y)*scaled_width+(width-1); q=pixels+((height-1-y) << 1)*scaled_width+((width-1) << 1); *q=(*p); *(q+1)=(*(p)); for (x=1; x < (ssize_t) width; x++) { p--; q-=2; *q=(*p); *(q+1)=(unsigned char) ((((size_t) *p)+((size_t) *(p+1))+1) >> 1); } } for (y=0; y < (ssize_t) (height-1); y++) { p=pixels+((size_t) y << 1)*scaled_width; q=p+scaled_width; r=q+scaled_width; for (x=0; x < (ssize_t) (width-1); x++) { *q=(unsigned char) ((((size_t) *p)+((size_t) *r)+1) >> 1); *(q+1)=(unsigned char) ((((size_t) *p)+((size_t) *(p+2))+ ((size_t) *r)+((size_t) *(r+2))+2) >> 2); q+=2; p+=2; r+=2; } *q++=(unsigned char) ((((size_t) *p++)+((size_t) *r++)+1) >> 1); *q++=(unsigned char) ((((size_t) *p++)+((size_t) *r++)+1) >> 1); } p=pixels+(2*height-2)*scaled_width; q=pixels+(2*height-1)*scaled_width; (void) memcpy(q,p,(size_t) (2*width)); } static Image *ReadPCDImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define ThrowPCDException(exception,message) \ { \ if (header != (unsigned char *) NULL) \ header=(unsigned char *) RelinquishMagickMemory(header); \ if (luma != (unsigned char *) NULL) \ luma=(unsigned char *) RelinquishMagickMemory(luma); \ if (chroma2 != (unsigned char *) NULL) \ chroma2=(unsigned char *) RelinquishMagickMemory(chroma2); \ if (chroma1 != (unsigned char *) NULL) \ chroma1=(unsigned char *) RelinquishMagickMemory(chroma1); \ ThrowReaderException((exception),(message)); \ } Image *image; MagickBooleanType status; MagickOffsetType offset; MagickSizeType number_pixels; register ssize_t i, y; register Quantum *q; register unsigned char *c1, *c2, *yy; size_t height, number_images, rotate, scene, width; ssize_t count, x; unsigned char *chroma1, *chroma2, *header, *luma; unsigned int overview; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Determine if this a PCD file. */ header=(unsigned char *) AcquireQuantumMemory(0x800,3UL*sizeof(*header)); if (header == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); chroma1=(unsigned char *) NULL; chroma2=(unsigned char *) NULL; luma=(unsigned char *) NULL; count=ReadBlob(image,3*0x800,header); if (count != (3*0x800)) ThrowPCDException(CorruptImageError,"ImproperImageHeader"); overview=LocaleNCompare((char *) header,"PCD_OPA",7) == 0; if ((LocaleNCompare((char *) header+0x800,"PCD",3) != 0) && (overview == 0)) ThrowPCDException(CorruptImageError,"ImproperImageHeader"); rotate=header[0x0e02] & 0x03; number_images=((header[10] << 8) | header[11]) & 0xffff; header=(unsigned char *) RelinquishMagickMemory(header); if ((overview != 0) && (AcquireMagickResource(ListLengthResource,number_images) == MagickFalse)) ThrowPCDException(ResourceLimitError,"ListLengthExceedsLimit"); /* Determine resolution by scene specification. */ if ((image->columns == 0) || (image->rows == 0)) scene=3; else { width=192; height=128; for (scene=1; scene < 6; scene++) { if ((width >= image->columns) && (height >= image->rows)) break; width<<=1; height<<=1; } } if (image_info->number_scenes != 0) scene=(size_t) MagickMin(image_info->scene,6); if (overview != 0) scene=1; /* Initialize image structure. */ width=192; height=128; for (i=1; i < (ssize_t) MagickMin(scene,3); i++) { width<<=1; height<<=1; } image->columns=width; image->rows=height; image->depth=8; for ( ; i < (ssize_t) scene; i++) { image->columns<<=1; image->rows<<=1; } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Allocate luma and chroma memory. */ number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != (size_t) number_pixels) ThrowPCDException(ResourceLimitError,"MemoryAllocationFailed"); chroma1=(unsigned char *) AcquireQuantumMemory(image->columns+1UL,image->rows* 10*sizeof(*chroma1)); chroma2=(unsigned char *) AcquireQuantumMemory(image->columns+1UL,image->rows* 10*sizeof(*chroma2)); luma=(unsigned char *) AcquireQuantumMemory(image->columns+1UL,image->rows* 10*sizeof(*luma)); if ((chroma1 == (unsigned char *) NULL) || (chroma2 == (unsigned char *) NULL) || (luma == (unsigned char *) NULL)) ThrowPCDException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(chroma1,0,(image->columns+1UL)*image->rows* 10*sizeof(*chroma1)); (void) memset(chroma2,0,(image->columns+1UL)*image->rows* 10*sizeof(*chroma2)); (void) memset(luma,0,(image->columns+1UL)*image->rows* 10*sizeof(*luma)); /* Advance to image data. */ offset=93; if (overview != 0) offset=2; else if (scene == 2) offset=20; else if (scene <= 1) offset=1; for (i=0; i < (ssize_t) (offset*0x800); i++) if (ReadBlobByte(image) == EOF) ThrowPCDException(CorruptImageError,"UnexpectedEndOfFile"); if (overview != 0) { MagickProgressMonitor progress_monitor; register ssize_t j; /* Read thumbnails from overview image. */ for (j=1; j <= (ssize_t) number_images; j++) { progress_monitor=SetImageProgressMonitor(image, (MagickProgressMonitor) NULL,image->client_data); (void) FormatLocaleString(image->filename,MagickPathExtent, "images/img%04ld.pcd",(long) j); (void) FormatLocaleString(image->magick_filename,MagickPathExtent, "images/img%04ld.pcd",(long) j); image->scene=(size_t) j; image->columns=width; image->rows=height; image->depth=8; yy=luma; c1=chroma1; c2=chroma2; for (y=0; y < (ssize_t) height; y+=2) { count=ReadBlob(image,width,yy); yy+=image->columns; count=ReadBlob(image,width,yy); yy+=image->columns; count=ReadBlob(image,width >> 1,c1); c1+=image->columns; count=ReadBlob(image,width >> 1,c2); c2+=image->columns; if (EOFBlob(image) != MagickFalse) ThrowPCDException(CorruptImageError,"UnexpectedEndOfFile"); } Upsample(image->columns >> 1,image->rows >> 1,image->columns,chroma1); Upsample(image->columns >> 1,image->rows >> 1,image->columns,chroma2); /* Transfer luminance and chrominance channels. */ yy=luma; c1=chroma1; c2=chroma2; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*yy++),q); SetPixelGreen(image,ScaleCharToQuantum(*c1++),q); SetPixelBlue(image,ScaleCharToQuantum(*c2++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } image->colorspace=YCCColorspace; if (LocaleCompare(image_info->magick,"PCDS") == 0) (void) SetImageColorspace(image,sRGBColorspace,exception); if (EOFBlob(image) != MagickFalse) break; if (j < (ssize_t) number_images) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); } (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,j-1,number_images); if (status == MagickFalse) break; } } chroma2=(unsigned char *) RelinquishMagickMemory(chroma2); chroma1=(unsigned char *) RelinquishMagickMemory(chroma1); luma=(unsigned char *) RelinquishMagickMemory(luma); if (status == MagickFalse) return(DestroyImageList(image)); return(OverviewImage(image_info,GetFirstImageInList(image),exception)); } /* Read interleaved image. */ yy=luma; c1=chroma1; c2=chroma2; for (y=0; y < (ssize_t) height; y+=2) { count=ReadBlob(image,width,yy); yy+=image->columns; count=ReadBlob(image,width,yy); yy+=image->columns; count=ReadBlob(image,width >> 1,c1); c1+=image->columns; count=ReadBlob(image,width >> 1,c2); c2+=image->columns; if (EOFBlob(image) != MagickFalse) ThrowPCDException(CorruptImageError,"UnexpectedEndOfFile"); } if (scene >= 4) { /* Recover luminance deltas for 1536x1024 image. */ Upsample(768,512,image->columns,luma); Upsample(384,256,image->columns,chroma1); Upsample(384,256,image->columns,chroma2); image->rows=1024; for (i=0; i < (4*0x800); i++) (void) ReadBlobByte(image); status=DecodeImage(image,luma,chroma1,chroma2,exception); if ((scene >= 5) && status) { /* Recover luminance deltas for 3072x2048 image. */ Upsample(1536,1024,image->columns,luma); Upsample(768,512,image->columns,chroma1); Upsample(768,512,image->columns,chroma2); image->rows=2048; offset=TellBlob(image)/0x800+12; offset=SeekBlob(image,offset*0x800,SEEK_SET); status=DecodeImage(image,luma,chroma1,chroma2,exception); if ((scene >= 6) && (status != MagickFalse)) { /* Recover luminance deltas for 6144x4096 image (vaporware). */ Upsample(3072,2048,image->columns,luma); Upsample(1536,1024,image->columns,chroma1); Upsample(1536,1024,image->columns,chroma2); image->rows=4096; } } } Upsample(image->columns >> 1,image->rows >> 1,image->columns,chroma1); Upsample(image->columns >> 1,image->rows >> 1,image->columns,chroma2); /* Transfer luminance and chrominance channels. */ yy=luma; c1=chroma1; c2=chroma2; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*yy++),q); SetPixelGreen(image,ScaleCharToQuantum(*c1++),q); SetPixelBlue(image,ScaleCharToQuantum(*c2++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } chroma2=(unsigned char *) RelinquishMagickMemory(chroma2); chroma1=(unsigned char *) RelinquishMagickMemory(chroma1); luma=(unsigned char *) RelinquishMagickMemory(luma); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); if (image_info->ping == MagickFalse) if ((rotate == 1) || (rotate == 3)) { double degrees; Image *rotate_image; /* Rotate image. */ degrees=rotate == 1 ? -90.0 : 90.0; rotate_image=RotateImage(image,degrees,exception); if (rotate_image != (Image *) NULL) { image=DestroyImage(image); image=rotate_image; } } /* Set CCIR 709 primaries with a D65 white point. */ image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->gamma=1.000f/2.200f; image->colorspace=YCCColorspace; if (LocaleCompare(image_info->magick,"PCDS") == 0) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image_info->scene != 0) for (i=0; i < (ssize_t) image_info->scene; i++) AppendImageToList(&image,CloneImage(image,0,0,MagickTrue,exception)); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P C D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPCDImage() adds attributes for the PCD image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPCDImage method is: % % size_t RegisterPCDImage(void) % */ ModuleExport size_t RegisterPCDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PCD","PCD","Photo CD"); entry->decoder=(DecodeImageHandler *) ReadPCDImage; entry->encoder=(EncodeImageHandler *) WritePCDImage; entry->magick=(IsImageFormatHandler *) IsPCD; entry->flags^=CoderAdjoinFlag; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PCD","PCDS","Photo CD"); entry->decoder=(DecodeImageHandler *) ReadPCDImage; entry->encoder=(EncodeImageHandler *) WritePCDImage; entry->flags^=CoderAdjoinFlag; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P C D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPCDImage() removes format registrations made by the % PCD module from the list of supported formats. % % The format of the UnregisterPCDImage method is: % % UnregisterPCDImage(void) % */ ModuleExport void UnregisterPCDImage(void) { (void) UnregisterMagickInfo("PCD"); (void) UnregisterMagickInfo("PCDS"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P C D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePCDImage() writes an image in the Photo CD encoded image format. % % The format of the WritePCDImage method is: % % MagickBooleanType WritePCDImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePCDTile(Image *image,const char *page_geometry, const size_t tile_columns,const size_t tile_rows,ExceptionInfo *exception) { GeometryInfo geometry_info; Image *downsample_image, *tile_image; MagickBooleanType status; MagickStatusType flags; RectangleInfo geometry; register const Quantum *p, *q; register ssize_t i, x; ssize_t y; /* Scale image to tile size. */ SetGeometry(image,&geometry); (void) ParseMetaGeometry(page_geometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); if ((geometry.width % 2) != 0) geometry.width--; if ((geometry.height % 2) != 0) geometry.height--; tile_image=ResizeImage(image,geometry.width,geometry.height,TriangleFilter, exception); if (tile_image == (Image *) NULL) return(MagickFalse); flags=ParseGeometry(page_geometry,&geometry_info); geometry.width=(size_t) geometry_info.rho; geometry.height=(size_t) geometry_info.sigma; if ((flags & SigmaValue) == 0) geometry.height=geometry.width; if ((tile_image->columns != geometry.width) || (tile_image->rows != geometry.height)) { Image *bordered_image; RectangleInfo border_info; /* Put a border around the image. */ border_info.width=(geometry.width-tile_image->columns+1) >> 1; border_info.height=(geometry.height-tile_image->rows+1) >> 1; bordered_image=BorderImage(tile_image,&border_info,image->compose, exception); if (bordered_image == (Image *) NULL) return(MagickFalse); tile_image=DestroyImage(tile_image); tile_image=bordered_image; } if ((tile_image->columns != tile_columns) || (tile_image->rows != tile_rows)) { Image *resize_image; resize_image=ResizeImage(tile_image,tile_columns,tile_rows, tile_image->filter,exception); if (resize_image != (Image *) NULL) { tile_image=DestroyImage(tile_image); tile_image=resize_image; } } (void) TransformImageColorspace(tile_image,YCCColorspace,exception); downsample_image=ResizeImage(tile_image,tile_image->columns/2, tile_image->rows/2,TriangleFilter,exception); if (downsample_image == (Image *) NULL) return(MagickFalse); /* Write tile to PCD file. */ for (y=0; y < (ssize_t) tile_image->rows; y+=2) { p=GetVirtualPixels(tile_image,0,y,tile_image->columns,2,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) (tile_image->columns << 1); x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(tile_image,p))); p+=GetPixelChannels(tile_image); } q=GetVirtualPixels(downsample_image,0,y >> 1,downsample_image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) downsample_image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar( GetPixelGreen(tile_image,q))); q+=GetPixelChannels(tile_image); } q=GetVirtualPixels(downsample_image,0,y >> 1,downsample_image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) downsample_image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar( GetPixelBlue(tile_image,q))); q+=GetPixelChannels(tile_image); } status=SetImageProgress(image,SaveImageTag,y,tile_image->rows); if (status == MagickFalse) break; } for (i=0; i < 0x800; i++) (void) WriteBlobByte(image,'\0'); downsample_image=DestroyImage(downsample_image); tile_image=DestroyImage(tile_image); return(MagickTrue); } static MagickBooleanType WritePCDImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { Image *pcd_image; MagickBooleanType status; register ssize_t i; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); pcd_image=image; if (image->columns < image->rows) { Image *rotate_image; /* Rotate portrait to landscape. */ rotate_image=RotateImage(image,90.0,exception); if (rotate_image == (Image *) NULL) return(MagickFalse); pcd_image=rotate_image; DestroyBlob(rotate_image); pcd_image->blob=ReferenceBlob(image->blob); } /* Open output image file. */ status=OpenBlob(image_info,pcd_image,WriteBinaryBlobMode,exception); if (status == MagickFalse) { if (pcd_image != image) pcd_image=DestroyImage(pcd_image); return(status); } if (IssRGBCompatibleColorspace(pcd_image->colorspace) == MagickFalse) (void) TransformImageColorspace(pcd_image,sRGBColorspace,exception); /* Write PCD image header. */ for (i=0; i < 32; i++) (void) WriteBlobByte(pcd_image,0xff); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x0e); for (i=0; i < 8; i++) (void) WriteBlobByte(pcd_image,'\0'); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x01); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x05); for (i=0; i < 8; i++) (void) WriteBlobByte(pcd_image,'\0'); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x0A); for (i=0; i < 36; i++) (void) WriteBlobByte(pcd_image,'\0'); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x01); for (i=0; i < 1944; i++) (void) WriteBlobByte(pcd_image,'\0'); (void) WriteBlob(pcd_image,7,(const unsigned char *) "PCD_IPI"); (void) WriteBlobByte(pcd_image,0x06); for (i=0; i < 1530; i++) (void) WriteBlobByte(pcd_image,'\0'); if (image->columns < image->rows) (void) WriteBlobByte(pcd_image,'\1'); else (void) WriteBlobByte(pcd_image,'\0'); for (i=0; i < (3*0x800-1539); i++) (void) WriteBlobByte(pcd_image,'\0'); /* Write PCD tiles. */ status=WritePCDTile(pcd_image,"768x512>",192,128,exception); status=WritePCDTile(pcd_image,"768x512>",384,256,exception); status=WritePCDTile(pcd_image,"768x512>",768,512,exception); (void) CloseBlob(pcd_image); if (pcd_image != image) pcd_image=DestroyImage(pcd_image); return(status); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_1410_0
crossvul-cpp_data_good_945_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO GGGGG RRRR IIIII FFFFF Y Y % % MM MM O O G R R I F Y Y % % M M M O O G GGG RRRR I FFF Y % % M M O O G G R R I F Y % % M M OOO GGGG R R IIIII F Y % % % % % % MagickWand Module Methods % % % % Software Design % % Cristy % % March 2000 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use the mogrify program to resize an image, blur, crop, despeckle, dither, % draw on, flip, join, re-sample, and much more. This tool is similiar to % convert except that the original image file is overwritten (unless you % change the file suffix with the -format option) with any changes you % request. % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/mogrify-private.h" #include "magick/blob-private.h" #include "magick/color-private.h" #include "magick/image-private.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/thread-private.h" #include "magick/string-private.h" #include "magick/timer-private.h" #include "magick/utility-private.h" #if defined(MAGICKCORE_HAVE_UTIME_H) #include <utime.h> #endif /* Define declarations. */ #define UndefinedCompressionQuality 0UL /* Constant declaration. */ static const char MogrifyBackgroundColor[] = "#fff", /* white */ MogrifyBorderColor[] = "#dfdfdf", /* sRGB gray */ MogrifyMatteColor[] = "#bdbdbd"; /* slightly darker gray */ /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o m m a n d G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickCommandGenesis() applies image processing options to an image as % prescribed by command line options. % % The format of the MagickCommandGenesis method is: % % MagickBooleanType MagickCommandGenesis(ImageInfo *image_info, % MagickCommand command,int argc,char **argv,char **metadata, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o command: Choose from ConvertImageCommand, IdentifyImageCommand, % MogrifyImageCommand, CompositeImageCommand, CompareImageCommand, % ConjureImageCommand, StreamImageCommand, ImportImageCommand, % DisplayImageCommand, or AnimateImageCommand. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o metadata: any metadata is returned here. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MagickCommandGenesis(ImageInfo *image_info, MagickCommand command,int argc,char **argv,char **metadata, ExceptionInfo *exception) { char *option; double duration, serial; MagickBooleanType concurrent, regard_warnings, status; register ssize_t i; size_t iterations, number_threads; ssize_t n; (void) setlocale(LC_ALL,""); (void) setlocale(LC_NUMERIC,"C"); concurrent=MagickFalse; duration=(-1.0); iterations=1; status=MagickTrue; regard_warnings=MagickFalse; for (i=1; i < (ssize_t) (argc-1); i++) { option=argv[i]; if ((strlen(option) == 1) || ((*option != '-') && (*option != '+'))) continue; if (LocaleCompare("bench",option+1) == 0) iterations=StringToUnsignedLong(argv[++i]); if (LocaleCompare("concurrent",option+1) == 0) concurrent=MagickTrue; if (LocaleCompare("debug",option+1) == 0) (void) SetLogEventMask(argv[++i]); if (LocaleCompare("distribute-cache",option+1) == 0) { DistributePixelCacheServer(StringToInteger(argv[++i]),exception); exit(0); } if (LocaleCompare("duration",option+1) == 0) duration=StringToDouble(argv[++i],(char **) NULL); if (LocaleCompare("regard-warnings",option+1) == 0) regard_warnings=MagickTrue; } if (iterations == 1) { status=command(image_info,argc,argv,metadata,exception); if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if ((metadata != (char **) NULL) && (*metadata != (char *) NULL)) { (void) fputs(*metadata,stdout); *metadata=DestroyString(*metadata); } return(status); } number_threads=GetOpenMPMaximumThreads(); serial=0.0; for (n=1; n <= (ssize_t) number_threads; n++) { double e, parallel, user_time; TimerInfo *timer; (void) SetMagickResourceLimit(ThreadResource,(MagickSizeType) n); timer=AcquireTimerInfo(); if (concurrent == MagickFalse) { for (i=0; i < (ssize_t) iterations; i++) { if (status == MagickFalse) continue; if (duration > 0) { if (GetElapsedTime(timer) > duration) continue; (void) ContinueTimer(timer); } status=command(image_info,argc,argv,metadata,exception); if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if ((metadata != (char **) NULL) && (*metadata != (char *) NULL)) { (void) fputs(*metadata,stdout); *metadata=DestroyString(*metadata); } } } else { SetOpenMPNested(1); #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp parallel for shared(status) #endif for (i=0; i < (ssize_t) iterations; i++) { if (status == MagickFalse) continue; if (duration > 0) { if (GetElapsedTime(timer) > duration) continue; (void) ContinueTimer(timer); } status=command(image_info,argc,argv,metadata,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_MagickCommandGenesis) #endif { if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if ((metadata != (char **) NULL) && (*metadata != (char *) NULL)) { (void) fputs(*metadata,stdout); *metadata=DestroyString(*metadata); } } } } user_time=GetUserTime(timer); parallel=GetElapsedTime(timer); e=1.0; if (n == 1) serial=parallel; else e=((1.0/(1.0/((serial/(serial+parallel))+(1.0-(serial/(serial+parallel)))/ (double) n)))-(1.0/(double) n))/(1.0-1.0/(double) n); (void) FormatLocaleFile(stderr, " Performance[%.20g]: %.20gi %0.3fips %0.6fe %0.6fu %lu:%02lu.%03lu\n", (double) n,(double) iterations,(double) iterations/parallel,e,user_time, (unsigned long) (parallel/60.0),(unsigned long) floor(fmod(parallel, 60.0)),(unsigned long) (1000.0*(parallel-floor(parallel))+0.5)); timer=DestroyTimerInfo(timer); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImage() applies simple single image processing options to a single % image that may be part of a large list, but also handles any 'region' % image handling. % % The image in the list may be modified in three different ways... % % * directly modified (EG: -negate, -gamma, -level, -annotate, -draw), % * replaced by a new image (EG: -spread, -resize, -rotate, -morphology) % * replace by a list of images (only the -separate option!) % % In each case the result is returned into the list, and a pointer to the % modified image (last image added if replaced by a list of images) is % returned. % % ASIDE: The -crop is present but restricted to non-tile single image crops % % This means if all the images are being processed (such as by % MogrifyImages(), next image to be processed will be as per the pointer % (*image)->next. Also the image list may grow as a result of some specific % operations but as images are never merged or deleted, it will never shrink % in length. Typically the list will remain the same length. % % WARNING: As the image pointed to may be replaced, the first image in the % list may also change. GetFirstImageInList() should be used by caller if % they wish return the Image pointer to the first image in list. % % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc, % const char **argv,Image **image) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Image *GetImageCache(const ImageInfo *image_info,const char *path, ExceptionInfo *exception) { char key[MaxTextExtent]; ExceptionInfo *sans_exception; Image *image; ImageInfo *read_info; /* Read an image into a image cache if not already present. Return the image that is in the cache under that filename. */ (void) FormatLocaleString(key,MaxTextExtent,"cache:%s",path); sans_exception=AcquireExceptionInfo(); image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (image != (Image *) NULL) return(image); read_info=CloneImageInfo(image_info); (void) CopyMagickString(read_info->filename,path,MaxTextExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) (void) SetImageRegistry(ImageRegistryType,key,image,exception); return(image); } static inline MagickBooleanType IsPathWritable(const char *path) { if (IsPathAccessible(path) == MagickFalse) return(MagickFalse); if (access_utf8(path,W_OK) != 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType MonitorProgress(const char *text, const MagickOffsetType offset,const MagickSizeType extent, void *wand_unused(client_data)) { char message[MaxTextExtent], tag[MaxTextExtent]; const char *locale_message; register char *p; wand_unreferenced(client_data); if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent)) return(MagickTrue); if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0)) return(MagickTrue); (void) CopyMagickString(tag,text,MaxTextExtent); p=strrchr(tag,'/'); if (p != (char *) NULL) *p='\0'; (void) FormatLocaleString(message,MaxTextExtent,"Monitor/%s",tag); locale_message=GetLocaleMessage(message); if (locale_message == message) locale_message=tag; if (p == (char *) NULL) (void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r", locale_message,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); else (void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r", locale_message,p+1,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); if (offset == (MagickOffsetType) (extent-1)) (void) FormatLocaleFile(stderr,"\n"); (void) fflush(stderr); return(MagickTrue); } static Image *SparseColorOption(const Image *image,const ChannelType channel, const SparseColorMethod method,const char *arguments, const MagickBooleanType color_from_image,ExceptionInfo *exception) { ChannelType channels; char token[MaxTextExtent]; const char *p; double *sparse_arguments; Image *sparse_image; MagickBooleanType error; MagickPixelPacket color; register size_t x; size_t number_arguments, number_colors; /* SparseColorOption() parses the complex -sparse-color argument into an an array of floating point values then calls SparseColorImage(). Argument is a complex mix of floating-point pixel coodinates, and color specifications (or direct floating point numbers). The number of floats needed to represent a color varies depending on the current channel setting. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Limit channels according to image - and add up number of color channel. */ channels=channel; if (image->colorspace != CMYKColorspace) channels=(ChannelType) (channels & ~IndexChannel); /* no index channel */ if (image->matte == MagickFalse) channels=(ChannelType) (channels & ~OpacityChannel); /* no alpha channel */ number_colors=0; if ((channels & RedChannel) != 0) number_colors++; if ((channels & GreenChannel) != 0) number_colors++; if ((channels & BlueChannel) != 0) number_colors++; if ((channels & IndexChannel) != 0) number_colors++; if ((channels & OpacityChannel) != 0) number_colors++; /* Read string, to determine number of arguments needed, */ p=arguments; x=0; while( *p != '\0' ) { GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == ',' ) continue; if ( isalpha((int) token[0]) || token[0] == '#' ) { if ( color_from_image ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "`%s': %s", "sparse-color", "Color arg given, when colors are coming from image"); return( (Image *) NULL); } x += number_colors; /* color argument */ } else { x++; /* floating point argument */ } } error=MagickTrue; if ( color_from_image ) { /* just the control points are being given */ error = ( x % 2 != 0 ) ? MagickTrue : MagickFalse; number_arguments=(x/2)*(2+number_colors); } else { /* control points and color values */ error = ( x % (2+number_colors) != 0 ) ? MagickTrue : MagickFalse; number_arguments=x; } if ( error ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "`%s': %s", "sparse-color", "Invalid number of Arguments"); return( (Image *) NULL); } /* Allocate and fill in the floating point arguments */ sparse_arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*sparse_arguments)); if (sparse_arguments == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError, " MemoryAllocationFailed\n""%s","SparseColorOption"); return( (Image *) NULL); } (void) memset(sparse_arguments,0,number_arguments* sizeof(*sparse_arguments)); p=arguments; x=0; while( *p != '\0' && x < number_arguments ) { /* X coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "`%s': %s", "sparse-color", "Color found, instead of X-coord"); error = MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* Y coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "`%s': %s", "sparse-color", "Color found, instead of Y-coord"); error = MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* color values for this control point */ #if 0 if ( (color_from_image ) { /* get color from image */ /* HOW??? */ } else #endif { /* color name or function given in string argument */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { /* Color string given */ (void) QueryMagickColor(token,&color,exception); if ( channels & RedChannel ) sparse_arguments[x++] = QuantumScale*color.red; if ( channels & GreenChannel ) sparse_arguments[x++] = QuantumScale*color.green; if ( channels & BlueChannel ) sparse_arguments[x++] = QuantumScale*color.blue; if ( channels & IndexChannel ) sparse_arguments[x++] = QuantumScale*color.index; if ( channels & OpacityChannel ) sparse_arguments[x++] = QuantumScale*color.opacity; } else { /* Colors given as a set of floating point values - experimental */ /* NB: token contains the first floating point value to use! */ if ( channels & RedChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ( channels & GreenChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ( channels & BlueChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ( channels & IndexChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ( channels & OpacityChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } } } } if ( number_arguments != x && !error ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, " InvalidArgument","`%s': %s","sparse-color","Argument Parsing Error"); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( (Image *) NULL); } if ( error ) return( (Image *) NULL); /* Call the Interpolation function with the parsed arguments */ sparse_image=SparseColorImage(image,channels,method,number_arguments, sparse_arguments,exception); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( sparse_image ); } WandExport MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc, const char **argv,Image **image,ExceptionInfo *exception) { ChannelType channel; const char *format, *option; DrawInfo *draw_info; GeometryInfo geometry_info; Image *region_image; ImageInfo *mogrify_info; MagickStatusType status; MagickPixelPacket fill; MagickStatusType flags; QuantizeInfo *quantize_info; RectangleInfo geometry, region_geometry; register ssize_t i; /* Initialize method variables. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (argc < 0) return(MagickTrue); mogrify_info=CloneImageInfo(image_info); draw_info=CloneDrawInfo(mogrify_info,(DrawInfo *) NULL); quantize_info=AcquireQuantizeInfo(mogrify_info); SetGeometryInfo(&geometry_info); GetMagickPixelPacket(*image,&fill); SetMagickPixelPacket(*image,&(*image)->background_color,(IndexPacket *) NULL, &fill); channel=mogrify_info->channel; format=GetImageOption(mogrify_info,"format"); SetGeometry(*image,&region_geometry); region_image=NewImageList(); /* Transmogrify the image. */ for (i=0; i < (ssize_t) argc; i++) { Image *mogrify_image; ssize_t count; option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=MagickMax(ParseCommandOption(MagickCommandOptions,MagickFalse,option), 0L); if ((i+count) >= (ssize_t) argc) break; status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception); mogrify_image=(Image *) NULL; switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { /* Adaptive blur image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=AdaptiveBlurImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { /* Adaptive resize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=AdaptiveResizeImage(*image,geometry.width, geometry.height,exception); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { /* Adaptive sharpen image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=AdaptiveSharpenImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("affine",option+1) == 0) { /* Affine matrix. */ if (*option == '+') { GetAffineMatrix(&draw_info->affine); break; } (void) ParseAffineGeometry(argv[i+1],&draw_info->affine,exception); break; } if (LocaleCompare("alpha",option+1) == 0) { AlphaChannelType alpha_type; (void) SyncImageSettings(mogrify_info,*image); alpha_type=(AlphaChannelType) ParseCommandOption(MagickAlphaOptions, MagickFalse,argv[i+1]); (void) SetImageAlphaChannel(*image,alpha_type); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("annotate",option+1) == 0) { char *text, geometry[MaxTextExtent]; /* Annotate image. */ (void) SyncImageSettings(mogrify_info,*image); SetGeometryInfo(&geometry_info); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; text=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (text == (char *) NULL) break; (void) CloneString(&draw_info->text,text); text=DestroyString(text); (void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f", geometry_info.xi,geometry_info.psi); (void) CloneString(&draw_info->geometry,geometry); draw_info->affine.sx=cos(DegreesToRadians( fmod(geometry_info.rho,360.0))); draw_info->affine.rx=sin(DegreesToRadians( fmod(geometry_info.rho,360.0))); draw_info->affine.ry=(-sin(DegreesToRadians( fmod(geometry_info.sigma,360.0)))); draw_info->affine.sy=cos(DegreesToRadians( fmod(geometry_info.sigma,360.0))); (void) AnnotateImage(*image,draw_info); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("antialias",option+1) == 0) { draw_info->stroke_antialias=(*option == '-') ? MagickTrue : MagickFalse; draw_info->text_antialias=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("auto-gamma",option+1) == 0) { /* Auto Adjust Gamma of image based on its mean */ (void) SyncImageSettings(mogrify_info,*image); (void) AutoGammaImageChannel(*image,channel); break; } if (LocaleCompare("auto-level",option+1) == 0) { /* Perfectly Normalize (max/min stretch) the image */ (void) SyncImageSettings(mogrify_info,*image); (void) AutoLevelImageChannel(*image,channel); break; } if (LocaleCompare("auto-orient",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); mogrify_image=AutoOrientImage(*image,(*image)->orientation, exception); break; } break; } case 'b': { if (LocaleCompare("black-threshold",option+1) == 0) { /* Black threshold image. */ (void) SyncImageSettings(mogrify_info,*image); (void) BlackThresholdImageChannel(*image,channel,argv[i+1], exception); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("blue-shift",option+1) == 0) { /* Blue shift image. */ (void) SyncImageSettings(mogrify_info,*image); geometry_info.rho=1.5; if (*option == '-') flags=ParseGeometry(argv[i+1],&geometry_info); mogrify_image=BlueShiftImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("blur",option+1) == 0) { /* Gaussian blur image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=BlurImageChannel(*image,channel,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("border",option+1) == 0) { /* Surround image with a border of solid color. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=BorderImage(*image,&geometry,exception); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') { (void) QueryColorDatabase(MogrifyBorderColor, &draw_info->border_color,exception); break; } (void) QueryColorDatabase(argv[i+1],&draw_info->border_color, exception); break; } if (LocaleCompare("box",option+1) == 0) { (void) QueryColorDatabase(argv[i+1],&draw_info->undercolor, exception); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { double brightness, contrast; GeometryInfo geometry_info; MagickStatusType flags; /* Brightness / contrast image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); brightness=geometry_info.rho; contrast=0.0; if ((flags & SigmaValue) != 0) contrast=geometry_info.sigma; (void) BrightnessContrastImageChannel(*image,channel,brightness, contrast); InheritException(exception,&(*image)->exception); break; } break; } case 'c': { if (LocaleCompare("canny",option+1) == 0) { /* Detect edges in the image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.10; if ((flags & PsiValue) == 0) geometry_info.psi=0.30; if ((flags & PercentValue) != 0) { geometry_info.xi/=100.0; geometry_info.psi/=100.0; } mogrify_image=CannyEdgeImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception); break; } if (LocaleCompare("cdl",option+1) == 0) { char *color_correction_collection; /* Color correct with a color decision list. */ (void) SyncImageSettings(mogrify_info,*image); color_correction_collection=FileToString(argv[i+1],~0UL,exception); if (color_correction_collection == (char *) NULL) break; (void) ColorDecisionListImage(*image,color_correction_collection); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("channel",option+1) == 0) { if (*option == '+') channel=DefaultChannels; else channel=(ChannelType) ParseChannelOption(argv[i+1]); break; } if (LocaleCompare("charcoal",option+1) == 0) { /* Charcoal image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=CharcoalImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("chop",option+1) == 0) { /* Chop the image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ChopImage(*image,&geometry,exception); break; } if (LocaleCompare("clamp",option+1) == 0) { /* Clamp image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ClampImageChannel(*image,channel); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("clip",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { (void) SetImageClipMask(*image,(Image *) NULL); InheritException(exception,&(*image)->exception); break; } (void) ClipImage(*image); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("clip-mask",option+1) == 0) { CacheView *mask_view; Image *mask_image; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t y; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,(Image *) NULL); InheritException(exception,&(*image)->exception); break; } /* Set the image mask. FUTURE: This Should Be a SetImageAlphaChannel() call, Or two. */ mask_image=GetImageCache(mogrify_info,argv[i+1],exception); if (mask_image == (Image *) NULL) break; if (SetImageStorageClass(mask_image,DirectClass) == MagickFalse) return(MagickFalse); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) mask_image->rows; y++) { q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) mask_image->columns; x++) { if (mask_image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(GetPixelIntensity(mask_image, q))); SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) break; } mask_view=DestroyCacheView(mask_view); mask_image->matte=MagickTrue; (void) SetImageClipMask(*image,mask_image); mask_image=DestroyImage(mask_image); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("clip-path",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) ClipImagePath(*image,argv[i+1],*option == '-' ? MagickTrue : MagickFalse); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("colorize",option+1) == 0) { /* Colorize the image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=ColorizeImage(*image,argv[i+1],draw_info->fill, exception); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel; (void) SyncImageSettings(mogrify_info,*image); kernel=AcquireKernelInfo(argv[i+1]); if (kernel == (KernelInfo *) NULL) break; mogrify_image=ColorMatrixImage(*image,kernel,exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("colors",option+1) == 0) { /* Reduce the number of colors in the image. */ (void) SyncImageSettings(mogrify_info,*image); quantize_info->number_colors=StringToUnsignedLong(argv[i+1]); if (quantize_info->number_colors == 0) break; if (((*image)->storage_class == DirectClass) || (*image)->colors > quantize_info->number_colors) (void) QuantizeImage(quantize_info,*image); else (void) CompressImageColormap(*image); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("colorspace",option+1) == 0) { ColorspaceType colorspace; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { (void) TransformImageColorspace(*image,sRGBColorspace); InheritException(exception,&(*image)->exception); break; } colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); (void) TransformImageColorspace(*image,colorspace); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("connected-components",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); mogrify_image=ConnectedComponentsImage(*image, StringToInteger(argv[i+1]),exception); break; } if (LocaleCompare("contrast",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) ContrastImage(*image,(*option == '-') ? MagickTrue : MagickFalse); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("contrast-stretch",option+1) == 0) { double black_point, white_point; MagickStatusType flags; /* Contrast stretch image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma : black_point; if ((flags & PercentValue) != 0) { black_point*=(double) (*image)->columns*(*image)->rows/100.0; white_point*=(double) (*image)->columns*(*image)->rows/100.0; } white_point=(MagickRealType) (*image)->columns*(*image)->rows- white_point; (void) ContrastStretchImageChannel(*image,channel,black_point, white_point); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("convolve",option+1) == 0) { double gamma; KernelInfo *kernel_info; register ssize_t j; size_t extent; (void) SyncImageSettings(mogrify_info,*image); kernel_info=AcquireKernelInfo(argv[i+1]); if (kernel_info == (KernelInfo *) NULL) break; extent=kernel_info->width*kernel_info->height; gamma=0.0; for (j=0; j < (ssize_t) extent; j++) gamma+=kernel_info->values[j]; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); for (j=0; j < (ssize_t) extent; j++) kernel_info->values[j]*=gamma; mogrify_image=MorphologyImage(*image,CorrelateMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("crop",option+1) == 0) { /* Crop a image to a smaller size */ (void) SyncImageSettings(mogrify_info,*image); #if 0 flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception); if (((geometry.width != 0) || (geometry.height != 0)) && ((flags & XValue) == 0) && ((flags & YValue) == 0)) break; #endif #if 0 mogrify_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception); mogrify_image->next = mogrify_image->previous = (Image *) NULL; (void) TransformImage(&mogrify_image,argv[i+1],(char *) NULL); InheritException(exception,&mogrify_image->exception); #else mogrify_image=CropImageToTiles(*image,argv[i+1],exception); #endif break; } if (LocaleCompare("cycle",option+1) == 0) { /* Cycle an image colormap. */ (void) SyncImageSettings(mogrify_info,*image); (void) CycleColormapImage(*image,(ssize_t) StringToLong(argv[i+1])); InheritException(exception,&(*image)->exception); break; } break; } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { StringInfo *passkey; /* Decipher pixels. */ (void) SyncImageSettings(mogrify_info,*image); passkey=FileToStringInfo(argv[i+1],~0UL,exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyDecipherImage(*image,passkey,exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("density",option+1) == 0) { /* Set image density. */ (void) CloneString(&draw_info->density,argv[i+1]); break; } if (LocaleCompare("depth",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { (void) SetImageDepth(*image,MAGICKCORE_QUANTUM_DEPTH); break; } (void) SetImageDepth(*image,StringToUnsignedLong(argv[i+1])); break; } if (LocaleCompare("deskew",option+1) == 0) { double threshold; /* Straighten the image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') threshold=40.0*QuantumRange/100.0; else threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); mogrify_image=DeskewImage(*image,threshold,exception); break; } if (LocaleCompare("despeckle",option+1) == 0) { /* Reduce the speckles within an image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=DespeckleImage(*image,exception); break; } if (LocaleCompare("display",option+1) == 0) { (void) CloneString(&draw_info->server_name,argv[i+1]); break; } if (LocaleCompare("distort",option+1) == 0) { char *args, token[MaxTextExtent]; const char *p; DistortImageMethod method; double *arguments; register ssize_t x; size_t number_arguments; /* Distort image. */ (void) SyncImageSettings(mogrify_info,*image); method=(DistortImageMethod) ParseCommandOption(MagickDistortOptions, MagickFalse,argv[i+1]); if (method == ResizeDistortion) { double resize_args[2]; /* Resize distortion. */ (void) ParseRegionGeometry(*image,argv[i+2],&geometry, exception); resize_args[0]=(double) geometry.width; resize_args[1]=(double) geometry.height; mogrify_image=DistortImage(*image,method,(size_t) 2, resize_args,MagickTrue,exception); break; } args=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (args == (char *) NULL) break; p=(char *) args; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); } number_arguments=(size_t) x; arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*arguments)); if (arguments == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*image)->filename); (void) memset(arguments,0,number_arguments*sizeof(*arguments)); p=(char *) args; for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arguments[x]=StringToDouble(token,(char **) NULL); } args=DestroyString(args); mogrify_image=DistortImage(*image,method,number_arguments,arguments, (*option == '+') ? MagickTrue : MagickFalse,exception); arguments=(double *) RelinquishMagickMemory(arguments); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { quantize_info->dither=MagickFalse; break; } quantize_info->dither=MagickTrue; quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,argv[i+1]); if (quantize_info->dither_method == NoDitherMethod) quantize_info->dither=MagickFalse; break; } if (LocaleCompare("draw",option+1) == 0) { /* Draw image. */ (void) SyncImageSettings(mogrify_info,*image); (void) CloneString(&draw_info->primitive,argv[i+1]); (void) DrawImage(*image,draw_info); InheritException(exception,&(*image)->exception); break; } break; } case 'e': { if (LocaleCompare("edge",option+1) == 0) { /* Enhance edges in the image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=EdgeImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("emboss",option+1) == 0) { /* Gaussian embossen image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=EmbossImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("encipher",option+1) == 0) { StringInfo *passkey; /* Encipher pixels. */ (void) SyncImageSettings(mogrify_info,*image); passkey=FileToStringInfo(argv[i+1],~0UL,exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyEncipherImage(*image,passkey,exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("encoding",option+1) == 0) { (void) CloneString(&draw_info->encoding,argv[i+1]); break; } if (LocaleCompare("enhance",option+1) == 0) { /* Enhance image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=EnhanceImage(*image,exception); break; } if (LocaleCompare("equalize",option+1) == 0) { /* Equalize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) EqualizeImageChannel(*image,channel); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("evaluate",option+1) == 0) { double constant; MagickEvaluateOperator op; (void) SyncImageSettings(mogrify_info,*image); op=(MagickEvaluateOperator) ParseCommandOption( MagickEvaluateOptions,MagickFalse,argv[i+1]); constant=StringToDoubleInterval(argv[i+2],(double) QuantumRange+ 1.0); (void) EvaluateImageChannel(*image,channel,op,constant,exception); break; } if (LocaleCompare("extent",option+1) == 0) { /* Set the image extent. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception); if (geometry.width == 0) geometry.width=(*image)->columns; if (geometry.height == 0) geometry.height=(*image)->rows; mogrify_image=ExtentImage(*image,&geometry,exception); break; } break; } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option == '+') { if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); break; } (void) CloneString(&draw_info->family,argv[i+1]); break; } if (LocaleCompare("features",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:features"); break; } (void) SetImageArtifact(*image,"identify:features",argv[i+1]); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("fill",option+1) == 0) { ExceptionInfo *sans; GetMagickPixelPacket(*image,&fill); if (*option == '+') { (void) QueryMagickColor("none",&fill,exception); (void) QueryColorDatabase("none",&draw_info->fill,exception); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); break; } sans=AcquireExceptionInfo(); (void) QueryMagickColor(argv[i+1],&fill,sans); status=QueryColorDatabase(argv[i+1],&draw_info->fill,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1], exception); break; } if (LocaleCompare("flip",option+1) == 0) { /* Flip image scanlines. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=FlipImage(*image,exception); break; } if (LocaleCompare("floodfill",option+1) == 0) { MagickPixelPacket target; /* Floodfill image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParsePageGeometry(*image,argv[i+1],&geometry,exception); (void) QueryMagickColor(argv[i+2],&target,exception); (void) FloodfillPaintImage(*image,channel,draw_info,&target, geometry.x,geometry.y,*option == '-' ? MagickFalse : MagickTrue); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("flop",option+1) == 0) { /* Flop image scanlines. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=FlopImage(*image,exception); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') { if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); break; } (void) CloneString(&draw_info->font,argv[i+1]); break; } if (LocaleCompare("format",option+1) == 0) { format=argv[i+1]; break; } if (LocaleCompare("frame",option+1) == 0) { FrameInfo frame_info; /* Surround image with an ornamental border. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); frame_info.width=geometry.width; frame_info.height=geometry.height; frame_info.outer_bevel=geometry.x; frame_info.inner_bevel=geometry.y; frame_info.x=(ssize_t) frame_info.width; frame_info.y=(ssize_t) frame_info.height; frame_info.width=(*image)->columns+2*frame_info.width; frame_info.height=(*image)->rows+2*frame_info.height; mogrify_image=FrameImage(*image,&frame_info,exception); break; } if (LocaleCompare("function",option+1) == 0) { char *arguments, token[MaxTextExtent]; const char *p; double *parameters; MagickFunction function; register ssize_t x; size_t number_parameters; /* Function Modify Image Values */ (void) SyncImageSettings(mogrify_info,*image); function=(MagickFunction) ParseCommandOption(MagickFunctionOptions, MagickFalse,argv[i+1]); arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (arguments == (char *) NULL) break; p=(char *) arguments; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); } number_parameters=(size_t) x; parameters=(double *) AcquireQuantumMemory(number_parameters, sizeof(*parameters)); if (parameters == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*image)->filename); (void) memset(parameters,0,number_parameters* sizeof(*parameters)); p=(char *) arguments; for (x=0; (x < (ssize_t) number_parameters) && (*p != '\0'); x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); parameters[x]=StringToDouble(token,(char **) NULL); } arguments=DestroyString(arguments); (void) FunctionImageChannel(*image,channel,function, number_parameters,parameters,exception); parameters=(double *) RelinquishMagickMemory(parameters); break; } break; } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { /* Gamma image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') (*image)->gamma=StringToDouble(argv[i+1],(char **) NULL); else { if (strchr(argv[i+1],',') != (char *) NULL) (void) GammaImage(*image,argv[i+1]); else (void) GammaImageChannel(*image,channel, StringToDouble(argv[i+1],(char **) NULL)); InheritException(exception,&(*image)->exception); } break; } if ((LocaleCompare("gaussian-blur",option+1) == 0) || (LocaleCompare("gaussian",option+1) == 0)) { /* Gaussian blur image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=GaussianBlurImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("geometry",option+1) == 0) { /* Record Image offset, Resize last image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { if ((*image)->geometry != (char *) NULL) (*image)->geometry=DestroyString((*image)->geometry); break; } flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) CloneString(&(*image)->geometry,argv[i+1]); else mogrify_image=ResizeImage(*image,geometry.width,geometry.height, (*image)->filter,(*image)->blur,exception); break; } if (LocaleCompare("gravity",option+1) == 0) { if (*option == '+') { draw_info->gravity=UndefinedGravity; break; } draw_info->gravity=(GravityType) ParseCommandOption( MagickGravityOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("grayscale",option+1) == 0) { PixelIntensityMethod method; (void) SyncImagesSettings(mogrify_info,*image); method=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,argv[i+1]); (void) GrayscaleImage(*image,method); InheritException(exception,&(*image)->exception); break; } break; } case 'h': { if (LocaleCompare("highlight-color",option+1) == 0) { (void) SetImageArtifact(*image,"compare:highlight-color",argv[i+1]); break; } if (LocaleCompare("hough-lines",option+1) == 0) { /* Identify lines in the image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=40; mogrify_image=HoughLineImage(*image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception); break; } break; } case 'i': { if (LocaleCompare("identify",option+1) == 0) { char *text; (void) SyncImageSettings(mogrify_info,*image); if (format == (char *) NULL) { (void) IdentifyImage(*image,stdout,mogrify_info->verbose); InheritException(exception,&(*image)->exception); break; } text=InterpretImageProperties(mogrify_info,*image,format); InheritException(exception,&(*image)->exception); if (text == (char *) NULL) break; (void) fputs(text,stdout); text=DestroyString(text); break; } if (LocaleCompare("implode",option+1) == 0) { /* Implode image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=ImplodeImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->interline_spacing=geometry_info.rho; break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->interword_spacing=geometry_info.rho; break; } if (LocaleCompare("interpolative-resize",option+1) == 0) { /* Resize image using 'point sampled' interpolation */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=InterpolativeResizeImage(*image,geometry.width, geometry.height,(*image)->interpolate,exception); break; } break; } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->kerning=geometry_info.rho; break; } if (LocaleCompare("kuwahara",option+1) == 0) { /* Edge preserving blur. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho-0.5; mogrify_image=KuwaharaImageChannel(*image,channel,geometry_info.rho, geometry_info.sigma,exception); break; } break; } case 'l': { if (LocaleCompare("lat",option+1) == 0) { /* Local adaptive threshold image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=AdaptiveThresholdImage(*image,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,(ssize_t) geometry_info.xi,exception); break; } if (LocaleCompare("level",option+1) == 0) { MagickRealType black_point, gamma, white_point; MagickStatusType flags; /* Parse levels. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(MagickRealType) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(MagickRealType) (QuantumRange/100.0); white_point*=(MagickRealType) (QuantumRange/100.0); } if ((flags & SigmaValue) == 0) white_point=(MagickRealType) QuantumRange-black_point; if ((*option == '+') || ((flags & AspectValue) != 0)) (void) LevelizeImageChannel(*image,channel,black_point, white_point,gamma); else (void) LevelImageChannel(*image,channel,black_point,white_point, gamma); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("level-colors",option+1) == 0) { char token[MaxTextExtent]; const char *p; MagickPixelPacket black_point, white_point; p=(const char *) argv[i+1]; GetNextToken(p,&p,MaxTextExtent,token); /* get black point color */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryMagickColor(token,&black_point,exception); else (void) QueryMagickColor("#000000",&black_point,exception); if (isalpha((int) token[0]) || (token[0] == '#')) GetNextToken(p,&p,MaxTextExtent,token); if (*token == '\0') white_point=black_point; /* set everything to that color */ else { if ((isalpha((int) *token) == 0) && ((*token == '#') == 0)) GetNextToken(p,&p,MaxTextExtent,token); /* Get white point color. */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryMagickColor(token,&white_point,exception); else (void) QueryMagickColor("#ffffff",&white_point,exception); } (void) LevelColorsImageChannel(*image,channel,&black_point, &white_point,*option == '+' ? MagickTrue : MagickFalse); break; } if (LocaleCompare("linear-stretch",option+1) == 0) { double black_point, white_point; MagickStatusType flags; (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(MagickRealType) (*image)->columns*(*image)->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) (*image)->columns*(*image)->rows/100.0; white_point*=(double) (*image)->columns*(*image)->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(MagickRealType) (*image)->columns*(*image)->rows- black_point; (void) LinearStretchImage(*image,black_point,white_point); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("linewidth",option+1) == 0) { draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { /* Liquid rescale image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); if ((flags & XValue) == 0) geometry.x=1; if ((flags & YValue) == 0) geometry.y=0; mogrify_image=LiquidRescaleImage(*image,geometry.width, geometry.height,1.0*geometry.x,1.0*geometry.y,exception); break; } if (LocaleCompare("local-contrast",option+1) == 0) { MagickStatusType flags; (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & RhoValue) == 0) geometry_info.rho=10; if ((flags & SigmaValue) == 0) geometry_info.sigma=12.5; mogrify_image=LocalContrastImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("lowlight-color",option+1) == 0) { (void) SetImageArtifact(*image,"compare:lowlight-color",argv[i+1]); break; } break; } case 'm': { if (LocaleCompare("magnify",option+1) == 0) { /* Double image size. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=MagnifyImage(*image,exception); break; } if (LocaleCompare("map",option+1) == 0) { Image *remap_image; /* Transform image colors to match this set of colors. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') break; remap_image=GetImageCache(mogrify_info,argv[i+1],exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(quantize_info,*image,remap_image); InheritException(exception,&(*image)->exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("mask",option+1) == 0) { Image *mask; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,(Image *) NULL); InheritException(exception,&(*image)->exception); break; } /* Set the image mask. */ mask=GetImageCache(mogrify_info,argv[i+1],exception); if (mask == (Image *) NULL) break; (void) SetImageMask(*image,mask); mask=DestroyImage(mask); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("matte",option+1) == 0) { (void) SetImageAlphaChannel(*image,(*option == '-') ? SetAlphaChannel : DeactivateAlphaChannel ); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("mean-shift",option+1) == 0) { /* Delineate arbitrarily shaped clusters in the image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=0.10*QuantumRange; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=MeanShiftImage(*image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception); break; } if (LocaleCompare("median",option+1) == 0) { /* Median filter image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=StatisticImageChannel(*image,channel,MedianStatistic, (size_t) geometry_info.rho,(size_t) geometry_info.rho,exception); break; } if (LocaleCompare("mode",option+1) == 0) { /* Mode image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=StatisticImageChannel(*image,channel,ModeStatistic, (size_t) geometry_info.rho,(size_t) geometry_info.rho,exception); break; } if (LocaleCompare("modulate",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) ModulateImage(*image,argv[i+1]); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("moments",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:moments"); break; } (void) SetImageArtifact(*image,"identify:moments",argv[i+1]); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("monitor",option+1) == 0) { if (*option == '+') { (void) SetImageProgressMonitor(*image, (MagickProgressMonitor) NULL,(void *) NULL); break; } (void) SetImageProgressMonitor(*image,MonitorProgress, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) SetImageType(*image,BilevelType); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MaxTextExtent]; const char *p; KernelInfo *kernel; MorphologyMethod method; ssize_t iterations; /* Morphological Image Operation */ (void) SyncImageSettings(mogrify_info,*image); p=argv[i+1]; GetNextToken(p,&p,MaxTextExtent,token); method=(MorphologyMethod) ParseCommandOption( MagickMorphologyOptions,MagickFalse,token); iterations=1L; GetNextToken(p,&p,MaxTextExtent,token); if ((*p == ':') || (*p == ',')) GetNextToken(p,&p,MaxTextExtent,token); if ((*p != '\0')) iterations=(ssize_t) StringToLong(p); kernel=AcquireKernelInfo(argv[i+2]); if (kernel == (KernelInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnabletoParseKernel","morphology"); status=MagickFalse; break; } mogrify_image=MorphologyImageChannel(*image,channel,method, iterations,kernel,exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("motion-blur",option+1) == 0) { /* Motion blur image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=MotionBlurImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,geometry_info.xi,exception); break; } break; } case 'n': { if (LocaleCompare("negate",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) NegateImageChannel(*image,channel,*option == '+' ? MagickTrue : MagickFalse); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("noise",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); if (*option == '-') { flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=StatisticImageChannel(*image,channel, NonpeakStatistic,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); } else { NoiseType noise; noise=(NoiseType) ParseCommandOption(MagickNoiseOptions, MagickFalse,argv[i+1]); mogrify_image=AddNoiseImageChannel(*image,channel,noise, exception); } break; } if (LocaleCompare("normalize",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) NormalizeImageChannel(*image,channel); InheritException(exception,&(*image)->exception); break; } break; } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { MagickPixelPacket target; (void) SyncImageSettings(mogrify_info,*image); (void) QueryMagickColor(argv[i+1],&target,exception); (void) OpaquePaintImageChannel(*image,channel,&target,&fill, *option == '-' ? MagickFalse : MagickTrue); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) OrderedPosterizeImageChannel(*image,channel,argv[i+1], exception); break; } break; } case 'p': { if (LocaleCompare("paint",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=OilPaintImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("pen",option+1) == 0) { if (*option == '+') { (void) QueryColorDatabase("none",&draw_info->fill,exception); break; } (void) QueryColorDatabase(argv[i+1],&draw_info->fill,exception); break; } if (LocaleCompare("perceptible",option+1) == 0) { /* Perceptible image. */ (void) SyncImageSettings(mogrify_info,*image); (void) PerceptibleImageChannel(*image,channel,StringToDouble( argv[i+1],(char **) NULL)); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') (void) ParseGeometry("12",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->pointsize=geometry_info.rho; break; } if (LocaleCompare("polaroid",option+1) == 0) { double angle; RandomInfo *random_info; /* Simulate a Polaroid picture. */ (void) SyncImageSettings(mogrify_info,*image); random_info=AcquireRandomInfo(); angle=22.5*(GetPseudoRandomValue(random_info)-0.5); random_info=DestroyRandomInfo(random_info); if (*option == '-') { SetGeometryInfo(&geometry_info); flags=ParseGeometry(argv[i+1],&geometry_info); angle=geometry_info.rho; } mogrify_image=PolaroidImage(*image,draw_info,angle,exception); break; } if (LocaleCompare("posterize",option+1) == 0) { /* Posterize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) PosterizeImage(*image,StringToUnsignedLong(argv[i+1]), quantize_info->dither); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("preview",option+1) == 0) { PreviewType preview_type; /* Preview image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') preview_type=UndefinedPreview; else preview_type=(PreviewType) ParseCommandOption( MagickPreviewOptions,MagickFalse,argv[i+1]); mogrify_image=PreviewImage(*image,preview_type,exception); break; } if (LocaleCompare("profile",option+1) == 0) { const char *name; const StringInfo *profile; Image *profile_image; ImageInfo *profile_info; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { /* Remove a profile from the image. */ (void) ProfileImage(*image,argv[i+1],(const unsigned char *) NULL,0,MagickTrue); InheritException(exception,&(*image)->exception); break; } /* Associate a profile with the image. */ profile_info=CloneImageInfo(mogrify_info); profile=GetImageProfile(*image,"iptc"); if (profile != (StringInfo *) NULL) profile_info->profile=(void *) CloneStringInfo(profile); profile_image=GetImageCache(profile_info,argv[i+1],exception); profile_info=DestroyImageInfo(profile_info); if (profile_image == (Image *) NULL) { StringInfo *profile; profile_info=CloneImageInfo(mogrify_info); (void) CopyMagickString(profile_info->filename,argv[i+1], MaxTextExtent); profile=FileToStringInfo(profile_info->filename,~0UL,exception); if (profile != (StringInfo *) NULL) { (void) SetImageInfo(profile_info,0,exception); (void) ProfileImage(*image,profile_info->magick, GetStringInfoDatum(profile),(size_t) GetStringInfoLength(profile),MagickFalse); profile=DestroyStringInfo(profile); } profile_info=DestroyImageInfo(profile_info); break; } ResetImageProfileIterator(profile_image); name=GetNextImageProfile(profile_image); while (name != (const char *) NULL) { profile=GetImageProfile(profile_image,name); if (profile != (StringInfo *) NULL) (void) ProfileImage(*image,name,GetStringInfoDatum(profile), (size_t) GetStringInfoLength(profile),MagickFalse); name=GetNextImageProfile(profile_image); } profile_image=DestroyImage(profile_image); break; } break; } case 'q': { if (LocaleCompare("quantize",option+1) == 0) { if (*option == '+') { quantize_info->colorspace=UndefinedColorspace; break; } quantize_info->colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); break; } break; } case 'r': { if (LocaleCompare("radial-blur",option+1) == 0 || LocaleCompare("rotational-blur",option+1) == 0) { /* Radial blur image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=RotationalBlurImageChannel(*image,channel, StringToDouble(argv[i+1],(char **) NULL),exception); break; } if (LocaleCompare("raise",option+1) == 0) { /* Surround image with a raise of solid color. */ flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); (void) RaiseImage(*image,&geometry,*option == '-' ? MagickTrue : MagickFalse); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("random-threshold",option+1) == 0) { /* Threshold image. */ (void) SyncImageSettings(mogrify_info,*image); (void) RandomThresholdImageChannel(*image,channel,argv[i+1], exception); break; } if (LocaleCompare("recolor",option+1) == 0) { KernelInfo *kernel; (void) SyncImageSettings(mogrify_info,*image); kernel=AcquireKernelInfo(argv[i+1]); if (kernel == (KernelInfo *) NULL) break; mogrify_image=ColorMatrixImage(*image,kernel,exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("region",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); if (region_image != (Image *) NULL) { /* Composite region. */ (void) CompositeImage(region_image,region_image->matte != MagickFalse ? CopyCompositeOp : OverCompositeOp,*image, region_geometry.x,region_geometry.y); InheritException(exception,&region_image->exception); *image=DestroyImage(*image); *image=region_image; region_image=(Image *) NULL; } if (*option == '+') break; /* Apply transformations to a selected region of the image. */ (void) ParseGravityGeometry(*image,argv[i+1],&region_geometry, exception); mogrify_image=CropImage(*image,&region_geometry,exception); if (mogrify_image == (Image *) NULL) break; region_image=(*image); *image=mogrify_image; mogrify_image=(Image *) NULL; break; } if (LocaleCompare("render",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); draw_info->render=(*option == '+') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("remap",option+1) == 0) { Image *remap_image; /* Transform image colors to match this set of colors. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') break; remap_image=GetImageCache(mogrify_info,argv[i+1],exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(quantize_info,*image,remap_image); InheritException(exception,&(*image)->exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("repage",option+1) == 0) { if (*option == '+') { (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); break; } (void) ResetImagePage(*image,argv[i+1]); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("resample",option+1) == 0) { /* Resample image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=ResampleImage(*image,geometry_info.rho, geometry_info.sigma,(*image)->filter,(*image)->blur,exception); break; } if (LocaleCompare("resize",option+1) == 0) { /* Resize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ResizeImage(*image,geometry.width,geometry.height, (*image)->filter,(*image)->blur,exception); break; } if (LocaleCompare("roll",option+1) == 0) { /* Roll image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); if ((flags & PercentValue) != 0) { geometry.x*=(double) (*image)->columns/100.0; geometry.y*=(double) (*image)->rows/100.0; } mogrify_image=RollImage(*image,geometry.x,geometry.y,exception); break; } if (LocaleCompare("rotate",option+1) == 0) { char *geometry; /* Check for conditional image rotation. */ (void) SyncImageSettings(mogrify_info,*image); if (strchr(argv[i+1],'>') != (char *) NULL) if ((*image)->columns <= (*image)->rows) break; if (strchr(argv[i+1],'<') != (char *) NULL) if ((*image)->columns >= (*image)->rows) break; /* Rotate image. */ geometry=ConstantString(argv[i+1]); (void) SubstituteString(&geometry,">",""); (void) SubstituteString(&geometry,"<",""); (void) ParseGeometry(geometry,&geometry_info); geometry=DestroyString(geometry); mogrify_image=RotateImage(*image,geometry_info.rho,exception); break; } break; } case 's': { if (LocaleCompare("sample",option+1) == 0) { /* Sample image with pixel replication. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=SampleImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("scale",option+1) == 0) { /* Resize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ScaleImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("selective-blur",option+1) == 0) { /* Selectively blur pixels within a contrast threshold. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=SelectiveBlurImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("separate",option+1) == 0) { /* Break channels into separate images. WARNING: This can generate multiple images! */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=SeparateImages(*image,channel,exception); break; } if (LocaleCompare("sepia-tone",option+1) == 0) { double threshold; /* Sepia-tone image. */ (void) SyncImageSettings(mogrify_info,*image); threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); mogrify_image=SepiaToneImage(*image,threshold,exception); break; } if (LocaleCompare("segment",option+1) == 0) { /* Segment image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; (void) SegmentImage(*image,(*image)->colorspace, mogrify_info->verbose,geometry_info.rho,geometry_info.sigma); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("set",option+1) == 0) { char *value; /* Set image option. */ if (*option == '+') { if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) DeleteImageRegistry(argv[i+1]+9); else if (LocaleNCompare(argv[i+1],"option:",7) == 0) { (void) DeleteImageOption(mogrify_info,argv[i+1]+7); (void) DeleteImageArtifact(*image,argv[i+1]+7); } else (void) DeleteImageProperty(*image,argv[i+1]); break; } value=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (value == (char *) NULL) break; if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) SetImageRegistry(StringRegistryType,argv[i+1]+9,value, exception); else if (LocaleNCompare(argv[i+1],"option:",7) == 0) { (void) SetImageOption(image_info,argv[i+1]+7,value); (void) SetImageOption(mogrify_info,argv[i+1]+7,value); (void) SetImageArtifact(*image,argv[i+1]+7,value); } else (void) SetImageProperty(*image,argv[i+1],value); value=DestroyString(value); break; } if (LocaleCompare("shade",option+1) == 0) { /* Shade image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=ShadeImage(*image,(*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("shadow",option+1) == 0) { /* Shadow image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=4.0; if ((flags & PsiValue) == 0) geometry_info.psi=4.0; mogrify_image=ShadowImage(*image,geometry_info.rho, geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t) ceil(geometry_info.psi-0.5),exception); break; } if (LocaleCompare("sharpen",option+1) == 0) { /* Sharpen image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=SharpenImageChannel(*image,channel,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("shave",option+1) == 0) { /* Shave the image edges. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ShaveImage(*image,&geometry,exception); break; } if (LocaleCompare("shear",option+1) == 0) { /* Shear image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=ShearImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { /* Sigmoidal non-linearity contrast control. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=(double) QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/ 100.0; (void) SigmoidalContrastImageChannel(*image,channel, (*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho, geometry_info.sigma); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("sketch",option+1) == 0) { /* Sketch image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=SketchImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("solarize",option+1) == 0) { double threshold; (void) SyncImageSettings(mogrify_info,*image); threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); (void) SolarizeImageChannel(*image,channel,threshold,exception); break; } if (LocaleCompare("sparse-color",option+1) == 0) { SparseColorMethod method; char *arguments; /* Sparse Color Interpolated Gradient */ (void) SyncImageSettings(mogrify_info,*image); method=(SparseColorMethod) ParseCommandOption( MagickSparseColorOptions,MagickFalse,argv[i+1]); arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (arguments == (char *) NULL) break; mogrify_image=SparseColorOption(*image,channel,method,arguments, option[0] == '+' ? MagickTrue : MagickFalse,exception); arguments=DestroyString(arguments); break; } if (LocaleCompare("splice",option+1) == 0) { /* Splice a solid color into the image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=SpliceImage(*image,&geometry,exception); break; } if (LocaleCompare("spread",option+1) == 0) { /* Spread an image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=SpreadImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("statistic",option+1) == 0) { StatisticType type; (void) SyncImageSettings(mogrify_info,*image); type=(StatisticType) ParseCommandOption(MagickStatisticOptions, MagickFalse,argv[i+1]); (void) ParseGeometry(argv[i+2],&geometry_info); mogrify_image=StatisticImageChannel(*image,channel,type,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); break; } if (LocaleCompare("stretch",option+1) == 0) { if (*option == '+') { draw_info->stretch=UndefinedStretch; break; } draw_info->stretch=(StretchType) ParseCommandOption( MagickStretchOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("strip",option+1) == 0) { /* Strip image of profiles and comments. */ (void) SyncImageSettings(mogrify_info,*image); (void) StripImage(*image); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("stroke",option+1) == 0) { ExceptionInfo *sans; if (*option == '+') { (void) QueryColorDatabase("none",&draw_info->stroke,exception); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage( draw_info->stroke_pattern); break; } sans=AcquireExceptionInfo(); status=QueryColorDatabase(argv[i+1],&draw_info->stroke,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) draw_info->stroke_pattern=GetImageCache(mogrify_info,argv[i+1], exception); break; } if (LocaleCompare("strokewidth",option+1) == 0) { draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL); break; } if (LocaleCompare("style",option+1) == 0) { if (*option == '+') { draw_info->style=UndefinedStyle; break; } draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,argv[i+1]); break; } if (LocaleCompare("swirl",option+1) == 0) { /* Swirl image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=SwirlImage(*image,geometry_info.rho,exception); break; } break; } case 't': { if (LocaleCompare("threshold",option+1) == 0) { double threshold; /* Threshold image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') threshold=(double) QuantumRange/2; else threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); (void) BilevelImageChannel(*image,channel,threshold); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("thumbnail",option+1) == 0) { /* Thumbnail image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ThumbnailImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("tile",option+1) == 0) { if (*option == '+') { if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); break; } draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1], exception); break; } if (LocaleCompare("tint",option+1) == 0) { /* Tint the image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=TintImage(*image,argv[i+1],draw_info->fill,exception); break; } if (LocaleCompare("transform",option+1) == 0) { /* Affine transform image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=AffineTransformImage(*image,&draw_info->affine, exception); break; } if (LocaleCompare("transparent",option+1) == 0) { MagickPixelPacket target; (void) SyncImageSettings(mogrify_info,*image); (void) QueryMagickColor(argv[i+1],&target,exception); (void) TransparentPaintImage(*image,&target,(Quantum) TransparentOpacity,*option == '-' ? MagickFalse : MagickTrue); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("transpose",option+1) == 0) { /* Transpose image scanlines. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=TransposeImage(*image,exception); break; } if (LocaleCompare("transverse",option+1) == 0) { /* Transverse image scanlines. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=TransverseImage(*image,exception); break; } if (LocaleCompare("treedepth",option+1) == 0) { quantize_info->tree_depth=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("trim",option+1) == 0) { /* Trim image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=TrimImage(*image,exception); break; } if (LocaleCompare("type",option+1) == 0) { ImageType type; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') type=UndefinedType; else type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, argv[i+1]); (*image)->type=UndefinedType; (void) SetImageType(*image,type); InheritException(exception,&(*image)->exception); break; } break; } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { (void) QueryColorDatabase(argv[i+1],&draw_info->undercolor, exception); break; } if (LocaleCompare("unique",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:unique-colors"); break; } (void) SetImageArtifact(*image,"identify:unique-colors","true"); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("unique-colors",option+1) == 0) { /* Unique image colors. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=UniqueImageColors(*image,exception); break; } if (LocaleCompare("unsharp",option+1) == 0) { /* Unsharp mask image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; if ((flags & PsiValue) == 0) geometry_info.psi=0.05; mogrify_image=UnsharpMaskImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,geometry_info.xi, geometry_info.psi,exception); break; } break; } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { (void) SetImageArtifact(*image,option+1, *option == '+' ? "false" : "true"); break; } if (LocaleCompare("vignette",option+1) == 0) { /* Vignette image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.1*(*image)->columns; if ((flags & PsiValue) == 0) geometry_info.psi=0.1*(*image)->rows; if ((flags & PercentValue) != 0) { geometry_info.xi*=(double) (*image)->columns/100.0; geometry_info.psi*=(double) (*image)->rows/100.0; } mogrify_image=VignetteImage(*image,geometry_info.rho, geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t) ceil(geometry_info.psi-0.5),exception); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { if (*option == '+') { (void) SetImageVirtualPixelMethod(*image, UndefinedVirtualPixelMethod); break; } (void) SetImageVirtualPixelMethod(*image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i+1])); break; } break; } case 'w': { if (LocaleCompare("wave",option+1) == 0) { /* Wave image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=WaveImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { /* Wavelet denoise image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) { geometry_info.rho=QuantumRange*geometry_info.rho/100.0; geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0; } if ((flags & SigmaValue) == 0) geometry_info.sigma=0.0; mogrify_image=WaveletDenoiseImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("weight",option+1) == 0) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse, argv[i+1]); if (weight == -1) weight=StringToUnsignedLong(argv[i+1]); draw_info->weight=(size_t) weight; break; } if (LocaleCompare("white-threshold",option+1) == 0) { /* White threshold image. */ (void) SyncImageSettings(mogrify_info,*image); (void) WhiteThresholdImageChannel(*image,channel,argv[i+1], exception); InheritException(exception,&(*image)->exception); break; } break; } default: break; } /* Replace current image with any image that was generated. */ if (mogrify_image != (Image *) NULL) ReplaceImageInListReturnLast(image,mogrify_image); i+=count; } if (region_image != (Image *) NULL) { /* Composite transformed region onto image. */ (void) SyncImageSettings(mogrify_info,*image); (void) CompositeImage(region_image,region_image->matte != MagickFalse ? CopyCompositeOp : OverCompositeOp,*image,region_geometry.x, region_geometry.y); InheritException(exception,&region_image->exception); *image=DestroyImage(*image); *image=region_image; region_image = (Image *) NULL; } /* Free resources. */ quantize_info=DestroyQuantizeInfo(quantize_info); draw_info=DestroyDrawInfo(draw_info); mogrify_info=DestroyImageInfo(mogrify_info); status=(MagickStatusType) (exception->severity < ErrorException ? 1 : 0); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e C o m m a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageCommand() transforms an image or a sequence of images. These % transforms include image scaling, image rotation, color reduction, and % others. The transmogrified image overwrites the original image. % % The format of the MogrifyImageCommand method is: % % MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,int argc, % const char **argv,char **metadata,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o argc: the number of elements in the argument vector. % % o argv: A text array containing the command line arguments. % % o metadata: any metadata is returned here. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType MogrifyUsage(void) { static const char miscellaneous[] = " -debug events display copious debugging information\n" " -distribute-cache port\n" " distributed pixel cache spanning one or more servers\n" " -help print program options\n" " -list type print a list of supported option arguments\n" " -log format format of debugging information\n" " -version print version information", operators[] = " -adaptive-blur geometry\n" " adaptively blur pixels; decrease effect near edges\n" " -adaptive-resize geometry\n" " adaptively resize image using 'mesh' interpolation\n" " -adaptive-sharpen geometry\n" " adaptively sharpen pixels; increase effect near edges\n" " -alpha option on, activate, off, deactivate, set, opaque, copy\n" " transparent, extract, background, or shape\n" " -annotate geometry text\n" " annotate the image with text\n" " -auto-gamma automagically adjust gamma level of image\n" " -auto-level automagically adjust color levels of image\n" " -auto-orient automagically orient (rotate) image\n" " -bench iterations measure performance\n" " -black-threshold value\n" " force all pixels below the threshold into black\n" " -blue-shift simulate a scene at nighttime in the moonlight\n" " -blur geometry reduce image noise and reduce detail levels\n" " -border geometry surround image with a border of color\n" " -bordercolor color border color\n" " -brightness-contrast geometry\n" " improve brightness / contrast of the image\n" " -canny geometry detect edges in the image\n" " -cdl filename color correct with a color decision list\n" " -charcoal radius simulate a charcoal drawing\n" " -chop geometry remove pixels from the image interior\n" " -clamp keep pixel values in range (0-QuantumRange)\n" " -clip clip along the first path from the 8BIM profile\n" " -clip-mask filename associate a clip mask with the image\n" " -clip-path id clip along a named path from the 8BIM profile\n" " -colorize value colorize the image with the fill color\n" " -color-matrix matrix apply color correction to the image\n" " -connected-components connectivity\n" " connected-components uniquely labeled\n" " -contrast enhance or reduce the image contrast\n" " -contrast-stretch geometry\n" " improve contrast by `stretching' the intensity range\n" " -convolve coefficients\n" " apply a convolution kernel to the image\n" " -cycle amount cycle the image colormap\n" " -decipher filename convert cipher pixels to plain pixels\n" " -deskew threshold straighten an image\n" " -despeckle reduce the speckles within an image\n" " -distort method args\n" " distort images according to given method ad args\n" " -draw string annotate the image with a graphic primitive\n" " -edge radius apply a filter to detect edges in the image\n" " -encipher filename convert plain pixels to cipher pixels\n" " -emboss radius emboss an image\n" " -enhance apply a digital filter to enhance a noisy image\n" " -equalize perform histogram equalization to an image\n" " -evaluate operator value\n" " evaluate an arithmetic, relational, or logical expression\n" " -extent geometry set the image size\n" " -extract geometry extract area from image\n" " -hough-lines geometry\n" " identify lines in the image\n" " -features distance analyze image features (e.g. contrast, correlation)\n" " -fft implements the discrete Fourier transform (DFT)\n" " -flip flip image vertically\n" " -floodfill geometry color\n" " floodfill the image with color\n" " -flop flop image horizontally\n" " -frame geometry surround image with an ornamental border\n" " -function name parameters\n" " apply function over image values\n" " -gamma value level of gamma correction\n" " -gaussian-blur geometry\n" " reduce image noise and reduce detail levels\n" " -geometry geometry preferred size or location of the image\n" " -grayscale method convert image to grayscale\n" " -help print program options\n" " -identify identify the format and characteristics of the image\n" " -ift implements the inverse discrete Fourier transform (DFT)\n" " -implode amount implode image pixels about the center\n" " -kuwahara geometry edge preserving noise reduction filter\n" " -lat geometry local adaptive thresholding\n" " -layers method optimize, merge, or compare image layers\n" " -level value adjust the level of image contrast\n" " -level-colors color,color\n" " level image with the given colors\n" " -linear-stretch geometry\n" " improve contrast by `stretching with saturation'\n" " -liquid-rescale geometry\n" " rescale image with seam-carving\n" " -local-contrast geometry\n" " enhance local contrast\n" " -magnify double the size of the image with pixel art scaling\n" " -mean-shift geometry delineate arbitrarily shaped clusters in the image\n" " -median geometry apply a median filter to the image\n" " -mode geometry make each pixel the 'predominant color' of the\n" " neighborhood\n" " -modulate value vary the brightness, saturation, and hue\n" " -monochrome transform image to black and white\n" " -morphology method kernel\n" " apply a morphology method to the image\n" " -motion-blur geometry\n" " simulate motion blur\n" " -negate replace every pixel with its complementary color \n" " -noise geometry add or reduce noise in an image\n" " -normalize transform image to span the full range of colors\n" " -opaque color change this color to the fill color\n" " -ordered-dither NxN\n" " add a noise pattern to the image with specific\n" " amplitudes\n" " -paint radius simulate an oil painting\n" " -perceptible epsilon\n" " pixel value less than |epsilon| become epsilon or\n" " -epsilon\n" " -polaroid angle simulate a Polaroid picture\n" " -posterize levels reduce the image to a limited number of color levels\n" " -profile filename add, delete, or apply an image profile\n" " -quantize colorspace reduce colors in this colorspace\n" " -radial-blur angle radial blur the image\n" " -raise value lighten/darken image edges to create a 3-D effect\n" " -random-threshold low,high\n" " random threshold the image\n" " -region geometry apply options to a portion of the image\n" " -render render vector graphics\n" " -resample geometry change the resolution of an image\n" " -resize geometry resize the image\n" " -roll geometry roll an image vertically or horizontally\n" " -rotate degrees apply Paeth rotation to the image\n" " -sample geometry scale image with pixel sampling\n" " -scale geometry scale the image\n" " -segment values segment an image\n" " -selective-blur geometry\n" " selectively blur pixels within a contrast threshold\n" " -sepia-tone threshold\n" " simulate a sepia-toned photo\n" " -set property value set an image property\n" " -shade degrees shade the image using a distant light source\n" " -shadow geometry simulate an image shadow\n" " -sharpen geometry sharpen the image\n" " -shave geometry shave pixels from the image edges\n" " -shear geometry slide one edge of the image along the X or Y axis\n" " -sigmoidal-contrast geometry\n" " increase the contrast without saturating highlights or\n" " shadows\n" " -sketch geometry simulate a pencil sketch\n" " -solarize threshold negate all pixels above the threshold level\n" " -sparse-color method args\n" " fill in a image based on a few color points\n" " -splice geometry splice the background color into the image\n" " -spread radius displace image pixels by a random amount\n" " -statistic type radius\n" " replace each pixel with corresponding statistic from the neighborhood\n" " -strip strip image of all profiles and comments\n" " -swirl degrees swirl image pixels about the center\n" " -threshold value threshold the image\n" " -thumbnail geometry create a thumbnail of the image\n" " -tile filename tile image when filling a graphic primitive\n" " -tint value tint the image with the fill color\n" " -transform affine transform image\n" " -transparent color make this color transparent within the image\n" " -transpose flip image vertically and rotate 90 degrees\n" " -transverse flop image horizontally and rotate 270 degrees\n" " -trim trim image edges\n" " -type type image type\n" " -unique-colors discard all but one of any pixel color\n" " -unsharp geometry sharpen the image\n" " -vignette geometry soften the edges of the image in vignette style\n" " -wave geometry alter an image along a sine wave\n" " -wavelet-denoise threshold\n" " removes noise from the image using a wavelet transform\n" " -white-threshold value\n" " force all pixels above the threshold into white", sequence_operators[] = " -affinity filename transform image colors to match this set of colors\n" " -append append an image sequence\n" " -clut apply a color lookup table to the image\n" " -coalesce merge a sequence of images\n" " -combine combine a sequence of images\n" " -compare mathematically and visually annotate the difference between an image and its reconstruction\n" " -complex operator perform complex mathematics on an image sequence\n" " -composite composite image\n" " -copy geometry offset\n" " copy pixels from one area of an image to another\n" " -crop geometry cut out a rectangular region of the image\n" " -deconstruct break down an image sequence into constituent parts\n" " -evaluate-sequence operator\n" " evaluate an arithmetic, relational, or logical expression\n" " -flatten flatten a sequence of images\n" " -fx expression apply mathematical expression to an image channel(s)\n" " -hald-clut apply a Hald color lookup table to the image\n" " -layers method optimize, merge, or compare image layers\n" " -morph value morph an image sequence\n" " -mosaic create a mosaic from an image sequence\n" " -poly terms build a polynomial from the image sequence and the corresponding\n" " terms (coefficients and degree pairs).\n" " -print string interpret string and print to console\n" " -process arguments process the image with a custom image filter\n" " -separate separate an image channel into a grayscale image\n" " -smush geometry smush an image sequence together\n" " -write filename write images to this file", settings[] = " -adjoin join images into a single multi-image file\n" " -affine matrix affine transform matrix\n" " -alpha option activate, deactivate, reset, or set the alpha channel\n" " -antialias remove pixel-aliasing\n" " -authenticate password\n" " decipher image with this password\n" " -attenuate value lessen (or intensify) when adding noise to an image\n" " -background color background color\n" " -bias value add bias when convolving an image\n" " -black-point-compensation\n" " use black point compensation\n" " -blue-primary point chromaticity blue primary point\n" " -bordercolor color border color\n" " -caption string assign a caption to an image\n" " -cdl filename color correct with a color decision list\n" " -channel type apply option to select image channels\n" " -colors value preferred number of colors in the image\n" " -colorspace type alternate image colorspace\n" " -comment string annotate image with comment\n" " -compose operator set image composite operator\n" " -compress type type of pixel compression when writing the image\n" " -decipher filename convert cipher pixels to plain pixels\n" " -define format:option\n" " define one or more image format options\n" " -delay value display the next image after pausing\n" " -density geometry horizontal and vertical density of the image\n" " -depth value image depth\n" " -direction type render text right-to-left or left-to-right\n" " -display server get image or font from this X server\n" " -dispose method layer disposal method\n" " -dither method apply error diffusion to image\n" " -encipher filename convert plain pixels to cipher pixels\n" " -encoding type text encoding type\n" " -endian type endianness (MSB or LSB) of the image\n" " -family name render text with this font family\n" " -features distance analyze image features (e.g. contrast, correlation)\n" " -fill color color to use when filling a graphic primitive\n" " -filter type use this filter when resizing an image\n" " -flatten flatten a sequence of images\n" " -font name render text with this font\n" " -format \"string\" output formatted image characteristics\n" " -function name apply a function to the image\n" " -fuzz distance colors within this distance are considered equal\n" " -gravity type horizontal and vertical text placement\n" " -green-primary point chromaticity green primary point\n" " -intensity method method to generate intensity value from pixel\n" " -intent type type of rendering intent when managing the image color\n" " -interlace type type of image interlacing scheme\n" " -interline-spacing value\n" " set the space between two text lines\n" " -interpolate method pixel color interpolation method\n" " -interword-spacing value\n" " set the space between two words\n" " -kerning value set the space between two letters\n" " -label string assign a label to an image\n" " -limit type value pixel cache resource limit\n" " -loop iterations add Netscape loop extension to your GIF animation\n" " -mask filename associate a mask with the image\n" " -matte store matte channel if the image has one\n" " -mattecolor color frame color\n" " -monitor monitor progress\n" " -morphology method kernel\n" " apply a morphology method to the image\n" " -orient type image orientation\n" " -page geometry size and location of an image canvas (setting)\n" " -path path write images to this path on disk\n" " -ping efficiently determine image attributes\n" " -pointsize value font point size\n" " -precision value maximum number of significant digits to print\n" " -preview type image preview type\n" " -quality value JPEG/MIFF/PNG compression level\n" " -quiet suppress all warning messages\n" " -red-primary point chromaticity red primary point\n" " -regard-warnings pay attention to warning messages\n" " -remap filename transform image colors to match this set of colors\n" " -repage geometry size and location of an image canvas\n" " -respect-parentheses settings remain in effect until parenthesis boundary\n" " -sampling-factor geometry\n" " horizontal and vertical sampling factor\n" " -scene value image scene number\n" " -seed value seed a new sequence of pseudo-random numbers\n" " -size geometry width and height of image\n" " -stretch type render text with this font stretch\n" " -stroke color graphic primitive stroke color\n" " -strokewidth value graphic primitive stroke width\n" " -style type render text with this font style\n" " -synchronize synchronize image to storage device\n" " -taint declare the image as modified\n" " -texture filename name of texture to tile onto the image background\n" " -tile-offset geometry\n" " tile offset\n" " -treedepth value color tree depth\n" " -transparent-color color\n" " transparent color\n" " -undercolor color annotation bounding box color\n" " -units type the units of image resolution\n" " -verbose print detailed information about the image\n" " -view FlashPix viewing transforms\n" " -virtual-pixel method\n" " virtual pixel access method\n" " -weight type render text with this font weight\n" " -white-point point chromaticity white point", stack_operators[] = " -delete indexes delete the image from the image sequence\n" " -duplicate count,indexes\n" " duplicate an image one or more times\n" " -insert index insert last image into the image sequence\n" " -reverse reverse image sequence\n" " -swap indexes swap two images in the image sequence"; ListMagickVersion(stdout); (void) printf("Usage: %s [options ...] file [ [options ...] file ...]\n", GetClientName()); (void) printf("\nImage Settings:\n"); (void) puts(settings); (void) printf("\nImage Operators:\n"); (void) puts(operators); (void) printf("\nImage Sequence Operators:\n"); (void) puts(sequence_operators); (void) printf("\nImage Stack Operators:\n"); (void) puts(stack_operators); (void) printf("\nMiscellaneous Options:\n"); (void) puts(miscellaneous); (void) printf( "\nBy default, the image format of `file' is determined by its magic\n"); (void) printf( "number. To specify a particular image format, precede the filename\n"); (void) printf( "with an image format name and a colon (i.e. ps:image) or specify the\n"); (void) printf( "image type as the filename suffix (i.e. image.ps). Specify 'file' as\n"); (void) printf("'-' for standard input or output.\n"); return(MagickFalse); } WandExport MagickBooleanType MogrifyImageCommand(ImageInfo *image_info, int argc,char **argv,char **wand_unused(metadata),ExceptionInfo *exception) { #define DestroyMogrify() \ { \ if (format != (char *) NULL) \ format=DestroyString(format); \ if (path != (char *) NULL) \ path=DestroyString(path); \ DestroyImageStack(); \ for (i=0; i < (ssize_t) argc; i++) \ argv[i]=DestroyString(argv[i]); \ argv=(char **) RelinquishMagickMemory(argv); \ } #define ThrowMogrifyException(asperity,tag,option) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),asperity,tag,"`%s'", \ option); \ DestroyMogrify(); \ return(MagickFalse); \ } #define ThrowMogrifyInvalidArgumentException(option,argument) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),OptionError, \ "InvalidArgument","`%s': %s",argument,option); \ DestroyMogrify(); \ return(MagickFalse); \ } char *format, *option, *path; Image *image; ImageStack image_stack[MaxImageStackDepth+1]; MagickBooleanType global_colormap; MagickBooleanType fire, pend, respect_parenthesis; MagickStatusType status; register ssize_t i; ssize_t j, k; wand_unreferenced(metadata); /* Set defaults. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(exception != (ExceptionInfo *) NULL); if (argc == 2) { option=argv[1]; if ((LocaleCompare("version",option+1) == 0) || (LocaleCompare("-version",option+1) == 0)) { ListMagickVersion(stdout); return(MagickTrue); } } if (argc < 2) return(MogrifyUsage()); format=(char *) NULL; path=(char *) NULL; global_colormap=MagickFalse; k=0; j=1; NewImageStack(); option=(char *) NULL; pend=MagickFalse; respect_parenthesis=MagickFalse; status=MagickTrue; /* Parse command line. */ ReadCommandlLine(argc,&argv); status=ExpandFilenames(&argc,&argv); if (status == MagickFalse) ThrowMogrifyException(ResourceLimitError,"MemoryAllocationFailed", GetExceptionMessage(errno)); for (i=1; i < (ssize_t) argc; i++) { option=argv[i]; if (LocaleCompare(option,"(") == 0) { FireImageStack(MagickFalse,MagickTrue,pend); if (k == MaxImageStackDepth) ThrowMogrifyException(OptionError,"ParenthesisNestedTooDeeply", option); PushImageStack(); continue; } if (LocaleCompare(option,")") == 0) { FireImageStack(MagickFalse,MagickTrue,MagickTrue); if (k == 0) ThrowMogrifyException(OptionError,"UnableToParseExpression",option); PopImageStack(); continue; } if (IsCommandOption(option) == MagickFalse) { char backup_filename[MaxTextExtent], *filename; Image *images; struct stat properties; /* Option is a file name: begin by reading image from specified file. */ FireImageStack(MagickFalse,MagickFalse,pend); filename=argv[i]; if ((LocaleCompare(filename,"--") == 0) && (i < (ssize_t) (argc-1))) filename=argv[++i]; (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); images=ReadImages(image_info,exception); status&=(images != (Image *) NULL) && (exception->severity < ErrorException); if (images == (Image *) NULL) continue; properties=(*GetBlobProperties(images)); if (format != (char *) NULL) (void) CopyMagickString(images->filename,images->magick_filename, MaxTextExtent); if (path != (char *) NULL) { GetPathComponent(option,TailPath,filename); (void) FormatLocaleString(images->filename,MaxTextExtent,"%s%c%s", path,*DirectorySeparator,filename); } if (format != (char *) NULL) AppendImageFormat(format,images->filename); AppendImageStack(images); FinalizeImageSettings(image_info,image,MagickFalse); if (global_colormap != MagickFalse) { QuantizeInfo *quantize_info; quantize_info=AcquireQuantizeInfo(image_info); (void) RemapImages(quantize_info,images,(Image *) NULL); quantize_info=DestroyQuantizeInfo(quantize_info); } *backup_filename='\0'; if ((LocaleCompare(image->filename,"-") != 0) && (IsPathWritable(image->filename) != MagickFalse)) { register ssize_t i; /* Rename image file as backup. */ (void) CopyMagickString(backup_filename,image->filename, MaxTextExtent); for (i=0; i < 6; i++) { (void) ConcatenateMagickString(backup_filename,"~",MaxTextExtent); if (IsPathAccessible(backup_filename) == MagickFalse) break; } if ((IsPathAccessible(backup_filename) != MagickFalse) || (rename_utf8(image->filename,backup_filename) != 0)) *backup_filename='\0'; } /* Write transmogrified image to disk. */ image_info->synchronize=MagickTrue; status&=WriteImages(image_info,image,image->filename,exception); if (status != MagickFalse) { #if defined(MAGICKCORE_HAVE_UTIME) { MagickBooleanType preserve_timestamp; preserve_timestamp=IsStringTrue(GetImageOption(image_info, "preserve-timestamp")); if (preserve_timestamp != MagickFalse) { struct utimbuf timestamp; timestamp.actime=properties.st_atime; timestamp.modtime=properties.st_mtime; (void) utime(image->filename,&timestamp); } } #endif if (*backup_filename != '\0') (void) remove_utf8(backup_filename); } RemoveAllImageStack(); continue; } pend=image != (Image *) NULL ? MagickTrue : MagickFalse; switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("affine",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("alpha",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickAlphaOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedAlphaChannelType", argv[i]); break; } if (LocaleCompare("annotate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); i++; break; } if (LocaleCompare("antialias",option+1) == 0) break; if (LocaleCompare("append",option+1) == 0) break; if (LocaleCompare("attenuate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("authenticate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("auto-gamma",option+1) == 0) break; if (LocaleCompare("auto-level",option+1) == 0) break; if (LocaleCompare("auto-orient",option+1) == 0) break; if (LocaleCompare("average",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'b': { if (LocaleCompare("background",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("bias",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) break; if (LocaleCompare("black-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blue-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blue-shift",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("border",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("box",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'c': { if (LocaleCompare("cache",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("canny",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("caption",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("channel",option+1) == 0) { ssize_t channel; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); channel=ParseChannelOption(argv[i]); if (channel < 0) ThrowMogrifyException(OptionError,"UnrecognizedChannelType", argv[i]); break; } if (LocaleCompare("cdl",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("charcoal",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("chop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("clamp",option+1) == 0) break; if (LocaleCompare("clip",option+1) == 0) break; if (LocaleCompare("clip-mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("clut",option+1) == 0) break; if (LocaleCompare("coalesce",option+1) == 0) break; if (LocaleCompare("colorize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel_info; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i]); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("colors",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("colorspace",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("combine",option+1) == 0) { if (*option == '-') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("comment",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("compare",option+1) == 0) break; if (LocaleCompare("complex",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickComplexOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedComplexOperator", argv[i]); break; } if (LocaleCompare("composite",option+1) == 0) break; if (LocaleCompare("compress",option+1) == 0) { ssize_t compress; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); compress=ParseCommandOption(MagickCompressOptions,MagickFalse, argv[i]); if (compress < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageCompression", argv[i]); break; } if (LocaleCompare("concurrent",option+1) == 0) break; if (LocaleCompare("connected-components",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("contrast",option+1) == 0) break; if (LocaleCompare("contrast-stretch",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("convolve",option+1) == 0) { KernelInfo *kernel_info; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i]); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("copy",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("crop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("cycle",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("deconstruct",option+1) == 0) break; if (LocaleCompare("debug",option+1) == 0) { ssize_t event; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); event=ParseCommandOption(MagickLogEventOptions,MagickFalse,argv[i]); if (event < 0) ThrowMogrifyException(OptionError,"UnrecognizedEventType", argv[i]); (void) SetLogEventMask(argv[i]); break; } if (LocaleCompare("define",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') { const char *define; define=GetImageOption(image_info,argv[i]); if (define == (const char *) NULL) ThrowMogrifyException(OptionError,"NoSuchOption",argv[i]); break; } break; } if (LocaleCompare("delay",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("delete",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("density",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("depth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("deskew",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("despeckle",option+1) == 0) break; if (LocaleCompare("dft",option+1) == 0) break; if (LocaleCompare("direction",option+1) == 0) { ssize_t direction; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, argv[i]); if (direction < 0) ThrowMogrifyException(OptionError,"UnrecognizedDirectionType", argv[i]); break; } if (LocaleCompare("display",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("dispose",option+1) == 0) { ssize_t dispose; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse, argv[i]); if (dispose < 0) ThrowMogrifyException(OptionError,"UnrecognizedDisposeMethod", argv[i]); break; } if (LocaleCompare("distort",option+1) == 0) { ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickDistortOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedDistortMethod", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("dither",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickDitherOptions,MagickFalse,argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedDitherMethod", argv[i]); break; } if (LocaleCompare("draw",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("duplicate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("duration",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'e': { if (LocaleCompare("edge",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("emboss",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("encipher",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("encoding",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("endian",option+1) == 0) { ssize_t endian; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); endian=ParseCommandOption(MagickEndianOptions,MagickFalse,argv[i]); if (endian < 0) ThrowMogrifyException(OptionError,"UnrecognizedEndianType", argv[i]); break; } if (LocaleCompare("enhance",option+1) == 0) break; if (LocaleCompare("equalize",option+1) == 0) break; if (LocaleCompare("evaluate",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("evaluate-sequence",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator", argv[i]); break; } if (LocaleCompare("extent",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("extract",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("features",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("fill",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("filter",option+1) == 0) { ssize_t filter; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); filter=ParseCommandOption(MagickFilterOptions,MagickFalse,argv[i]); if (filter < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageFilter", argv[i]); break; } if (LocaleCompare("flatten",option+1) == 0) break; if (LocaleCompare("flip",option+1) == 0) break; if (LocaleCompare("flop",option+1) == 0) break; if (LocaleCompare("floodfill",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("format",option+1) == 0) { (void) CopyMagickString(argv[i]+1,"sans",MaxTextExtent); (void) CloneString(&format,(char *) NULL); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); (void) CloneString(&format,argv[i]); (void) CopyMagickString(image_info->filename,format,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,":", MaxTextExtent); (void) SetImageInfo(image_info,0,exception); if (*image_info->magick == '\0') ThrowMogrifyException(OptionError,"UnrecognizedImageFormat", format); break; } if (LocaleCompare("frame",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("function",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickFunctionOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedFunction",argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("fuzz",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("fx",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if ((LocaleCompare("gaussian-blur",option+1) == 0) || (LocaleCompare("gaussian",option+1) == 0)) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("geometry",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("gravity",option+1) == 0) { ssize_t gravity; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse, argv[i]); if (gravity < 0) ThrowMogrifyException(OptionError,"UnrecognizedGravityType", argv[i]); break; } if (LocaleCompare("grayscale",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickPixelIntensityOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedIntensityMethod", argv[i]); break; } if (LocaleCompare("green-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) break; if (LocaleCompare("hough-lines",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if ((LocaleCompare("help",option+1) == 0) || (LocaleCompare("-help",option+1) == 0)) return(MogrifyUsage()); ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'i': { if (LocaleCompare("identify",option+1) == 0) break; if (LocaleCompare("idft",option+1) == 0) break; if (LocaleCompare("implode",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("intensity",option+1) == 0) { ssize_t intensity; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); intensity=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,argv[i]); if (intensity < 0) ThrowMogrifyException(OptionError, "UnrecognizedPixelIntensityMethod",argv[i]); break; } if (LocaleCompare("intent",option+1) == 0) { ssize_t intent; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); intent=ParseCommandOption(MagickIntentOptions,MagickFalse,argv[i]); if (intent < 0) ThrowMogrifyException(OptionError,"UnrecognizedIntentType", argv[i]); break; } if (LocaleCompare("interlace",option+1) == 0) { ssize_t interlace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); interlace=ParseCommandOption(MagickInterlaceOptions,MagickFalse, argv[i]); if (interlace < 0) ThrowMogrifyException(OptionError,"UnrecognizedInterlaceType", argv[i]); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("interpolate",option+1) == 0) { ssize_t interpolate; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); interpolate=ParseCommandOption(MagickInterpolateOptions,MagickFalse, argv[i]); if (interpolate < 0) ThrowMogrifyException(OptionError,"UnrecognizedInterpolateMethod", argv[i]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("kuwahara",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'l': { if (LocaleCompare("label",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("lat",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); } if (LocaleCompare("layers",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickLayerOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedLayerMethod", argv[i]); break; } if (LocaleCompare("level",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("level-colors",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("linewidth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("limit",option+1) == 0) { char *p; double value; ssize_t resource; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); resource=ParseCommandOption(MagickResourceOptions,MagickFalse, argv[i]); if (resource < 0) ThrowMogrifyException(OptionError,"UnrecognizedResourceType", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); value=StringToDouble(argv[i],&p); (void) value; if ((p == argv[i]) && (LocaleCompare("unlimited",argv[i]) != 0)) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("list",option+1) == 0) { ssize_t list; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i]); if (list < 0) ThrowMogrifyException(OptionError,"UnrecognizedListType",argv[i]); status=MogrifyImageInfo(image_info,(int) (i-j+1),(const char **) argv+j,exception); return(status == 0 ? MagickFalse : MagickTrue); } if (LocaleCompare("local-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("log",option+1) == 0) { if (*option == '+') break; i++; if ((i == (ssize_t) argc) || (strchr(argv[i],'%') == (char *) NULL)) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("loop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'm': { if (LocaleCompare("magnify",option+1) == 0) break; if (LocaleCompare("map",option+1) == 0) { global_colormap=(*option == '+') ? MagickTrue : MagickFalse; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("matte",option+1) == 0) break; if (LocaleCompare("mattecolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("metric",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickMetricOptions,MagickTrue,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedMetricType", argv[i]); break; } if (LocaleCompare("maximum",option+1) == 0) break; if (LocaleCompare("mean-shift",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("median",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("minimum",option+1) == 0) break; if (LocaleCompare("modulate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("mode",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("monitor",option+1) == 0) break; if (LocaleCompare("monochrome",option+1) == 0) break; if (LocaleCompare("morph",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MaxTextExtent]; KernelInfo *kernel_info; ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); GetNextToken(argv[i],(const char **) NULL,MaxTextExtent,token); op=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedMorphologyMethod", token); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i]); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("mosaic",option+1) == 0) break; if (LocaleCompare("motion-blur",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'n': { if (LocaleCompare("negate",option+1) == 0) break; if (LocaleCompare("noise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') { ssize_t noise; noise=ParseCommandOption(MagickNoiseOptions,MagickFalse,argv[i]); if (noise < 0) ThrowMogrifyException(OptionError,"UnrecognizedNoiseType", argv[i]); break; } if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("noop",option+1) == 0) break; if (LocaleCompare("normalize",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("orient",option+1) == 0) { ssize_t orientation; orientation=UndefinedOrientation; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); orientation=ParseCommandOption(MagickOrientationOptions,MagickFalse, argv[i]); if (orientation < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageOrientation", argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'p': { if (LocaleCompare("page",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("paint",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("path",option+1) == 0) { (void) CloneString(&path,(char *) NULL); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); (void) CloneString(&path,argv[i]); break; } if (LocaleCompare("perceptible",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("polaroid",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("poly",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("posterize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("precision",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("print",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("process",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("profile",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'q': { if (LocaleCompare("quality",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("quantize",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("quiet",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'r': { if (LocaleCompare("radial-blur",option+1) == 0 || LocaleCompare("rotational-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("raise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("random-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("recolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("red-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); } if (LocaleCompare("regard-warnings",option+1) == 0) break; if (LocaleCompare("region",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("remap",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("render",option+1) == 0) break; if (LocaleCompare("repage",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("resample",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("resize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleNCompare("respect-parentheses",option+1,17) == 0) { respect_parenthesis=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("reverse",option+1) == 0) break; if (LocaleCompare("roll",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("rotate",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 's': { if (LocaleCompare("sample",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sampling-factor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("scale",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("scene",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("seed",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("segment",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("selective-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("separate",option+1) == 0) break; if (LocaleCompare("sepia-tone",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("set",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("shade",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shadow",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sharpen",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shave",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shear",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("size",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sketch",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("smush",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; break; } if (LocaleCompare("solarize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sparse-color",option+1) == 0) { ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickSparseColorOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedSparseColorMethod", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("splice",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("spread",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("statistic",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickStatisticOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedStatisticType", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("stretch",option+1) == 0) { ssize_t stretch; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,argv[i]); if (stretch < 0) ThrowMogrifyException(OptionError,"UnrecognizedStyleType", argv[i]); break; } if (LocaleCompare("strip",option+1) == 0) break; if (LocaleCompare("stroke",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("style",option+1) == 0) { ssize_t style; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); style=ParseCommandOption(MagickStyleOptions,MagickFalse,argv[i]); if (style < 0) ThrowMogrifyException(OptionError,"UnrecognizedStyleType", argv[i]); break; } if (LocaleCompare("swap",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("swirl",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("synchronize",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 't': { if (LocaleCompare("taint",option+1) == 0) break; if (LocaleCompare("texture",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("tile",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("tile-offset",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("tint",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("transform",option+1) == 0) break; if (LocaleCompare("transpose",option+1) == 0) break; if (LocaleCompare("transverse",option+1) == 0) break; if (LocaleCompare("threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("thumbnail",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("transparent",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("transparent-color",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("treedepth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("trim",option+1) == 0) break; if (LocaleCompare("type",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickTypeOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageType", argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("unique-colors",option+1) == 0) break; if (LocaleCompare("units",option+1) == 0) { ssize_t units; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); units=ParseCommandOption(MagickResolutionOptions,MagickFalse, argv[i]); if (units < 0) ThrowMogrifyException(OptionError,"UnrecognizedUnitsType", argv[i]); break; } if (LocaleCompare("unsharp",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { image_info->verbose=(*option == '-') ? MagickTrue : MagickFalse; break; } if ((LocaleCompare("version",option+1) == 0) || (LocaleCompare("-version",option+1) == 0)) { ListMagickVersion(stdout); break; } if (LocaleCompare("view",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("vignette",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError, "UnrecognizedVirtualPixelMethod",argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'w': { if (LocaleCompare("wave",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("weight",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("white-point",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("white-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("write",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case '?': break; default: ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } fire=(GetCommandOptionFlags(MagickCommandOptions,MagickFalse,option) & FireOptionFlag) == 0 ? MagickFalse : MagickTrue; if (fire != MagickFalse) FireImageStack(MagickFalse,MagickTrue,MagickTrue); } if (k != 0) ThrowMogrifyException(OptionError,"UnbalancedParenthesis",argv[i]); if (i != (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingAnImageFilename",argv[i]); DestroyMogrify(); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageInfo() applies image processing settings to the image as % prescribed by command line options. % % The format of the MogrifyImageInfo method is: % % MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,const int argc, % const char **argv,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info, const int argc,const char **argv,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; ssize_t count; register ssize_t i; /* Initialize method variables. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (argc < 0) return(MagickTrue); /* Set the image settings. */ for (i=0; i < (ssize_t) argc; i++) { option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=ParseCommandOption(MagickCommandOptions,MagickFalse,option); count=MagickMax(count,0L); if ((i+count) >= (ssize_t) argc) break; switch (*(option+1)) { case 'a': { if (LocaleCompare("adjoin",option+1) == 0) { image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("antialias",option+1) == 0) { image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("attenuate",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("authenticate",option+1) == 0) { if (*option == '+') (void) CloneString(&image_info->authenticate,(char *) NULL); else (void) CloneString(&image_info->authenticate,argv[i+1]); break; } break; } case 'b': { if (LocaleCompare("background",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) QueryColorDatabase(MogrifyBackgroundColor, &image_info->background_color,exception); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorDatabase(argv[i+1],&image_info->background_color, exception); break; } if (LocaleCompare("bias",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("blue-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) QueryColorDatabase(MogrifyBorderColor, &image_info->border_color,exception); break; } (void) QueryColorDatabase(argv[i+1],&image_info->border_color, exception); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("box",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,"undercolor","none"); break; } (void) SetImageOption(image_info,"undercolor",argv[i+1]); break; } break; } case 'c': { if (LocaleCompare("cache",option+1) == 0) { MagickSizeType limit; limit=MagickResourceInfinity; if (LocaleCompare("unlimited",argv[i+1]) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1],100.0); (void) SetMagickResourceLimit(MemoryResource,limit); (void) SetMagickResourceLimit(MapResource,2*limit); break; } if (LocaleCompare("caption",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("channel",option+1) == 0) { if (*option == '+') { image_info->channel=DefaultChannels; break; } image_info->channel=(ChannelType) ParseChannelOption(argv[i+1]); break; } if (LocaleCompare("colors",option+1) == 0) { image_info->colors=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("colorspace",option+1) == 0) { if (*option == '+') { image_info->colorspace=UndefinedColorspace; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("comment",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("compose",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("compress",option+1) == 0) { if (*option == '+') { image_info->compression=UndefinedCompression; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'd': { if (LocaleCompare("debug",option+1) == 0) { if (*option == '+') (void) SetLogEventMask("none"); else (void) SetLogEventMask(argv[i+1]); image_info->debug=IsEventLogging(); break; } if (LocaleCompare("define",option+1) == 0) { if (*option == '+') { if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) DeleteImageRegistry(argv[i+1]+9); else (void) DeleteImageOption(image_info,argv[i+1]); break; } if (LocaleNCompare(argv[i+1],"registry:",9) == 0) { (void) DefineImageRegistry(StringRegistryType,argv[i+1]+9, exception); break; } (void) DefineImageOption(image_info,argv[i+1]); break; } if (LocaleCompare("delay",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("density",option+1) == 0) { /* Set image density. */ if (*option == '+') { if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); (void) SetImageOption(image_info,option+1,"72"); break; } (void) CloneString(&image_info->density,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("depth",option+1) == 0) { if (*option == '+') { image_info->depth=MAGICKCORE_QUANTUM_DEPTH; break; } image_info->depth=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("direction",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("display",option+1) == 0) { if (*option == '+') { if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); break; } (void) CloneString(&image_info->server_name,argv[i+1]); break; } if (LocaleCompare("dispose",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { image_info->dither=MagickFalse; (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); image_info->dither=MagickTrue; break; } break; } case 'e': { if (LocaleCompare("encoding",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("endian",option+1) == 0) { if (*option == '+') { image_info->endian=UndefinedEndian; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->endian=(EndianType) ParseCommandOption( MagickEndianOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("extract",option+1) == 0) { /* Set image extract geometry. */ if (*option == '+') { if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); break; } (void) CloneString(&image_info->extract,argv[i+1]); break; } break; } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option != '+') (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("fill",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("filter",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') { if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); break; } (void) CloneString(&image_info->font,argv[i+1]); break; } if (LocaleCompare("format",option+1) == 0) { register const char *q; for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%')) if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL) image_info->ping=MagickFalse; (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("fuzz",option+1) == 0) { if (*option == '+') { image_info->fuzz=0.0; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->fuzz=StringToDoubleInterval(argv[i+1],(double) QuantumRange+1.0); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'g': { if (LocaleCompare("gravity",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("green-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'i': { if (LocaleCompare("intensity",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("intent",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interlace",option+1) == 0) { if (*option == '+') { image_info->interlace=UndefinedInterlace; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->interlace=(InterlaceType) ParseCommandOption( MagickInterlaceOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interpolate",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'l': { if (LocaleCompare("label",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("limit",option+1) == 0) { MagickSizeType limit; ResourceType type; if (*option == '+') break; type=(ResourceType) ParseCommandOption(MagickResourceOptions, MagickFalse,argv[i+1]); limit=MagickResourceInfinity; if (LocaleCompare("unlimited",argv[i+2]) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0); (void) SetMagickResourceLimit(type,limit); break; } if (LocaleCompare("list",option+1) == 0) { ssize_t list; /* Display configuration list. */ list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]); switch (list) { case MagickCoderOptions: { (void) ListCoderInfo((FILE *) NULL,exception); break; } case MagickColorOptions: { (void) ListColorInfo((FILE *) NULL,exception); break; } case MagickConfigureOptions: { (void) ListConfigureInfo((FILE *) NULL,exception); break; } case MagickDelegateOptions: { (void) ListDelegateInfo((FILE *) NULL,exception); break; } case MagickFontOptions: { (void) ListTypeInfo((FILE *) NULL,exception); break; } case MagickFormatOptions: { (void) ListMagickInfo((FILE *) NULL,exception); break; } case MagickLocaleOptions: { (void) ListLocaleInfo((FILE *) NULL,exception); break; } case MagickLogOptions: { (void) ListLogInfo((FILE *) NULL,exception); break; } case MagickMagicOptions: { (void) ListMagicInfo((FILE *) NULL,exception); break; } case MagickMimeOptions: { (void) ListMimeInfo((FILE *) NULL,exception); break; } case MagickModuleOptions: { (void) ListModuleInfo((FILE *) NULL,exception); break; } case MagickPolicyOptions: { (void) ListPolicyInfo((FILE *) NULL,exception); break; } case MagickResourceOptions: { (void) ListMagickResourceInfo((FILE *) NULL,exception); break; } case MagickThresholdOptions: { (void) ListThresholdMaps((FILE *) NULL,exception); break; } default: { (void) ListCommandOptions((FILE *) NULL,(CommandOption) list, exception); break; } } break; } if (LocaleCompare("log",option+1) == 0) { if (*option == '+') break; (void) SetLogFormat(argv[i+1]); break; } if (LocaleCompare("loop",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'm': { if (LocaleCompare("matte",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("mattecolor",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorDatabase(MogrifyMatteColor, &image_info->matte_color,exception); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorDatabase(argv[i+1],&image_info->matte_color, exception); break; } if (LocaleCompare("metric",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("monitor",option+1) == 0) { (void) SetImageInfoProgressMonitor(image_info,MonitorProgress, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse; break; } break; } case 'o': { if (LocaleCompare("orient",option+1) == 0) { if (*option == '+') { image_info->orientation=UndefinedOrientation; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } } case 'p': { if (LocaleCompare("page",option+1) == 0) { char *canonical_page, page[MaxTextExtent]; const char *image_option; MagickStatusType flags; RectangleInfo geometry; if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) CloneString(&image_info->page,(char *) NULL); break; } (void) memset(&geometry,0,sizeof(geometry)); image_option=GetImageOption(image_info,"page"); if (image_option != (const char *) NULL) (void) ParseAbsoluteGeometry(image_option,&geometry); canonical_page=GetPageGeometry(argv[i+1]); flags=ParseAbsoluteGeometry(canonical_page,&geometry); canonical_page=DestroyString(canonical_page); (void) FormatLocaleString(page,MaxTextExtent,"%lux%lu", (unsigned long) geometry.width,(unsigned long) geometry.height); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) FormatLocaleString(page,MaxTextExtent,"%lux%lu%+ld%+ld", (unsigned long) geometry.width,(unsigned long) geometry.height, (long) geometry.x,(long) geometry.y); (void) SetImageOption(image_info,option+1,page); (void) CloneString(&image_info->page,page); break; } if (LocaleCompare("pen",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("ping",option+1) == 0) { image_info->ping=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') geometry_info.rho=0.0; else (void) ParseGeometry(argv[i+1],&geometry_info); image_info->pointsize=geometry_info.rho; break; } if (LocaleCompare("precision",option+1) == 0) { (void) SetMagickPrecision(StringToInteger(argv[i+1])); break; } if (LocaleCompare("preview",option+1) == 0) { /* Preview image. */ if (*option == '+') { image_info->preview_type=UndefinedPreview; break; } image_info->preview_type=(PreviewType) ParseCommandOption( MagickPreviewOptions,MagickFalse,argv[i+1]); break; } break; } case 'q': { if (LocaleCompare("quality",option+1) == 0) { /* Set image compression quality. */ if (*option == '+') { image_info->quality=UndefinedCompressionQuality; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->quality=StringToUnsignedLong(argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("quiet",option+1) == 0) { static WarningHandler warning_handler = (WarningHandler) NULL; if (*option == '+') { /* Restore error or warning messages. */ warning_handler=SetWarningHandler(warning_handler); break; } /* Suppress error or warning messages. */ warning_handler=SetWarningHandler((WarningHandler) NULL); break; } break; } case 'r': { if (LocaleCompare("red-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 's': { if (LocaleCompare("sampling-factor",option+1) == 0) { /* Set image sampling factor. */ if (*option == '+') { if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); break; } (void) CloneString(&image_info->sampling_factor,argv[i+1]); break; } if (LocaleCompare("scene",option+1) == 0) { /* Set image scene. */ if (*option == '+') { image_info->scene=0; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->scene=StringToUnsignedLong(argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("seed",option+1) == 0) { unsigned long seed; if (*option == '+') { seed=(unsigned long) time((time_t *) NULL); SetRandomSecretKey(seed); break; } seed=StringToUnsignedLong(argv[i+1]); SetRandomSecretKey(seed); break; } if (LocaleCompare("size",option+1) == 0) { if (*option == '+') { if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); break; } (void) CloneString(&image_info->size,argv[i+1]); break; } if (LocaleCompare("stroke",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"none"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("style",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"none"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("synchronize",option+1) == 0) { if (*option == '+') { image_info->synchronize=MagickFalse; break; } image_info->synchronize=MagickTrue; break; } break; } case 't': { if (LocaleCompare("taint",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("texture",option+1) == 0) { if (*option == '+') { if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); break; } (void) CloneString(&image_info->texture,argv[i+1]); break; } if (LocaleCompare("tile-offset",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("transparent-color",option+1) == 0) { if (*option == '+') { (void) QueryColorDatabase("none",&image_info->transparent_color, exception); (void) SetImageOption(image_info,option+1,"none"); break; } (void) QueryColorDatabase(argv[i+1],&image_info->transparent_color, exception); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("type",option+1) == 0) { if (*option == '+') { image_info->type=UndefinedType; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions, MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("units",option+1) == 0) { if (*option == '+') { image_info->units=UndefinedResolution; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->units=(ResolutionType) ParseCommandOption( MagickResolutionOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { if (*option == '+') { image_info->verbose=MagickFalse; break; } image_info->verbose=MagickTrue; image_info->ping=MagickFalse; break; } if (LocaleCompare("view",option+1) == 0) { if (*option == '+') { if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); break; } (void) CloneString(&image_info->view,argv[i+1]); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { if (*option == '+') { image_info->virtual_pixel_method=UndefinedVirtualPixelMethod; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->virtual_pixel_method=(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'w': { if (LocaleCompare("weight",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("white-point",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } default: break; } i+=count; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageList() applies any command line options that might affect the % entire image list (e.g. -append, -coalesce, etc.). % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImageList(ImageInfo *image_info,const int argc, % const char **argv,Image **images,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o images: pointer to pointer of the first image in image list. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info, const int argc,const char **argv,Image **images,ExceptionInfo *exception) { ChannelType channel; const char *option; ImageInfo *mogrify_info; MagickStatusType status; QuantizeInfo *quantize_info; register ssize_t i; ssize_t count, index; /* Apply options to the image list. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image **) NULL); assert((*images)->previous == (Image *) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); if ((argc <= 0) || (*argv == (char *) NULL)) return(MagickTrue); mogrify_info=CloneImageInfo(image_info); quantize_info=AcquireQuantizeInfo(mogrify_info); channel=mogrify_info->channel; status=MagickTrue; for (i=0; i < (ssize_t) argc; i++) { if (*images == (Image *) NULL) break; option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=ParseCommandOption(MagickCommandOptions,MagickFalse,option); count=MagickMax(count,0L); if ((i+count) >= (ssize_t) argc) break; status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception); switch (*(option+1)) { case 'a': { if (LocaleCompare("affinity",option+1) == 0) { (void) SyncImagesSettings(mogrify_info,*images); if (*option == '+') { (void) RemapImages(quantize_info,*images,(Image *) NULL); InheritException(exception,&(*images)->exception); break; } i++; break; } if (LocaleCompare("append",option+1) == 0) { Image *append_image; (void) SyncImagesSettings(mogrify_info,*images); append_image=AppendImages(*images,*option == '-' ? MagickTrue : MagickFalse,exception); if (append_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=append_image; break; } if (LocaleCompare("average",option+1) == 0) { Image *average_image; /* Average an image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images); average_image=EvaluateImages(*images,MeanEvaluateOperator, exception); if (average_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=average_image; break; } break; } case 'c': { if (LocaleCompare("channel",option+1) == 0) { if (*option == '+') { channel=DefaultChannels; break; } channel=(ChannelType) ParseChannelOption(argv[i+1]); break; } if (LocaleCompare("clut",option+1) == 0) { Image *clut_image, *image; (void) SyncImagesSettings(mogrify_info,*images); image=RemoveFirstImageFromList(images); clut_image=RemoveFirstImageFromList(images); if (clut_image == (Image *) NULL) { status=MagickFalse; break; } (void) ClutImageChannel(image,channel,clut_image); clut_image=DestroyImage(clut_image); InheritException(exception,&image->exception); *images=DestroyImageList(*images); *images=image; break; } if (LocaleCompare("coalesce",option+1) == 0) { Image *coalesce_image; (void) SyncImagesSettings(mogrify_info,*images); coalesce_image=CoalesceImages(*images,exception); if (coalesce_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=coalesce_image; break; } if (LocaleCompare("combine",option+1) == 0) { Image *combine_image; (void) SyncImagesSettings(mogrify_info,*images); combine_image=CombineImages(*images,channel,exception); if (combine_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=combine_image; break; } if (LocaleCompare("compare",option+1) == 0) { const char *option; double distortion; Image *difference_image, *image, *reconstruct_image; MetricType metric; /* Mathematically and visually annotate the difference between an image and its reconstruction. */ (void) SyncImagesSettings(mogrify_info,*images); image=RemoveFirstImageFromList(images); reconstruct_image=RemoveFirstImageFromList(images); if (reconstruct_image == (Image *) NULL) { status=MagickFalse; break; } metric=UndefinedMetric; option=GetImageOption(image_info,"metric"); if (option != (const char *) NULL) metric=(MetricType) ParseCommandOption(MagickMetricOptions, MagickFalse,option); difference_image=CompareImageChannels(image,reconstruct_image, channel,metric,&distortion,exception); if (difference_image == (Image *) NULL) break; reconstruct_image=DestroyImage(reconstruct_image); image=DestroyImage(image); if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=difference_image; break; } if (LocaleCompare("complex",option+1) == 0) { ComplexOperator op; Image *complex_images; (void) SyncImageSettings(mogrify_info,*images); op=(ComplexOperator) ParseCommandOption(MagickComplexOptions, MagickFalse,argv[i+1]); complex_images=ComplexImages(*images,op,exception); if (complex_images == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=complex_images; break; } if (LocaleCompare("composite",option+1) == 0) { Image *mask_image, *composite_image, *image; RectangleInfo geometry; (void) SyncImagesSettings(mogrify_info,*images); image=RemoveFirstImageFromList(images); composite_image=RemoveFirstImageFromList(images); if (composite_image == (Image *) NULL) { status=MagickFalse; break; } (void) TransformImage(&composite_image,(char *) NULL, composite_image->geometry); SetGeometry(composite_image,&geometry); (void) ParseAbsoluteGeometry(composite_image->geometry,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity, &geometry); mask_image=RemoveFirstImageFromList(images); if (mask_image != (Image *) NULL) { if ((image->compose == DisplaceCompositeOp) || (image->compose == DistortCompositeOp)) { /* Merge Y displacement into X displacement image. */ (void) CompositeImage(composite_image,CopyGreenCompositeOp, mask_image,0,0); mask_image=DestroyImage(mask_image); } else { /* Set a blending mask for the composition. */ if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); image->mask=mask_image; (void) NegateImage(image->mask,MagickFalse); } } (void) CompositeImageChannel(image,channel,image->compose, composite_image,geometry.x,geometry.y); if (mask_image != (Image *) NULL) { image->mask=DestroyImage(image->mask); mask_image=image->mask; } composite_image=DestroyImage(composite_image); InheritException(exception,&image->exception); *images=DestroyImageList(*images); *images=image; break; } if (LocaleCompare("copy",option+1) == 0) { Image *source_image; OffsetInfo offset; RectangleInfo geometry; /* Copy image pixels. */ (void) SyncImageSettings(mogrify_info,*images); (void) ParsePageGeometry(*images,argv[i+2],&geometry,exception); offset.x=geometry.x; offset.y=geometry.y; source_image=(*images); if (source_image->next != (Image *) NULL) source_image=source_image->next; (void) ParsePageGeometry(source_image,argv[i+1],&geometry, exception); status=CopyImagePixels(*images,source_image,&geometry,&offset, exception); break; } break; } case 'd': { if (LocaleCompare("deconstruct",option+1) == 0) { Image *deconstruct_image; (void) SyncImagesSettings(mogrify_info,*images); deconstruct_image=DeconstructImages(*images,exception); if (deconstruct_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=deconstruct_image; break; } if (LocaleCompare("delete",option+1) == 0) { if (*option == '+') DeleteImages(images,"-1",exception); else DeleteImages(images,argv[i+1],exception); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { quantize_info->dither=MagickFalse; break; } quantize_info->dither=MagickTrue; quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("duplicate",option+1) == 0) { Image *duplicate_images; if (*option == '+') duplicate_images=DuplicateImages(*images,1,"-1",exception); else { const char *p; size_t number_duplicates; number_duplicates=(size_t) StringToLong(argv[i+1]); p=strchr(argv[i+1],','); if (p == (const char *) NULL) duplicate_images=DuplicateImages(*images,number_duplicates, "-1",exception); else duplicate_images=DuplicateImages(*images,number_duplicates,p, exception); } AppendImageToList(images, duplicate_images); (void) SyncImagesSettings(mogrify_info,*images); break; } break; } case 'e': { if (LocaleCompare("evaluate-sequence",option+1) == 0) { Image *evaluate_image; MagickEvaluateOperator op; (void) SyncImageSettings(mogrify_info,*images); op=(MagickEvaluateOperator) ParseCommandOption( MagickEvaluateOptions,MagickFalse,argv[i+1]); evaluate_image=EvaluateImages(*images,op,exception); if (evaluate_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=evaluate_image; break; } break; } case 'f': { if (LocaleCompare("fft",option+1) == 0) { Image *fourier_image; /* Implements the discrete Fourier transform (DFT). */ (void) SyncImageSettings(mogrify_info,*images); fourier_image=ForwardFourierTransformImage(*images,*option == '-' ? MagickTrue : MagickFalse,exception); if (fourier_image == (Image *) NULL) break; *images=DestroyImageList(*images); *images=fourier_image; break; } if (LocaleCompare("flatten",option+1) == 0) { Image *flatten_image; (void) SyncImagesSettings(mogrify_info,*images); flatten_image=MergeImageLayers(*images,FlattenLayer,exception); if (flatten_image == (Image *) NULL) break; *images=DestroyImageList(*images); *images=flatten_image; break; } if (LocaleCompare("fx",option+1) == 0) { Image *fx_image; (void) SyncImagesSettings(mogrify_info,*images); fx_image=FxImageChannel(*images,channel,argv[i+1],exception); if (fx_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=fx_image; break; } break; } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) { Image *hald_image, *image; (void) SyncImagesSettings(mogrify_info,*images); image=RemoveFirstImageFromList(images); hald_image=RemoveFirstImageFromList(images); if (hald_image == (Image *) NULL) { status=MagickFalse; break; } (void) HaldClutImageChannel(image,channel,hald_image); hald_image=DestroyImage(hald_image); InheritException(exception,&image->exception); if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=image; break; } break; } case 'i': { if (LocaleCompare("ift",option+1) == 0) { Image *fourier_image, *magnitude_image, *phase_image; /* Implements the inverse fourier discrete Fourier transform (DFT). */ (void) SyncImagesSettings(mogrify_info,*images); magnitude_image=RemoveFirstImageFromList(images); phase_image=RemoveFirstImageFromList(images); if (phase_image == (Image *) NULL) { status=MagickFalse; break; } fourier_image=InverseFourierTransformImage(magnitude_image, phase_image,*option == '-' ? MagickTrue : MagickFalse,exception); if (fourier_image == (Image *) NULL) break; if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=fourier_image; break; } if (LocaleCompare("insert",option+1) == 0) { Image *p, *q; index=0; if (*option != '+') index=(ssize_t) StringToLong(argv[i+1]); p=RemoveLastImageFromList(images); if (p == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",argv[i+1]); status=MagickFalse; break; } q=p; if (index == 0) PrependImageToList(images,q); else if (index == (ssize_t) GetImageListLength(*images)) AppendImageToList(images,q); else { q=GetImageFromList(*images,index-1); if (q == (Image *) NULL) { p=DestroyImage(p); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",argv[i+1]); status=MagickFalse; break; } InsertImageInList(&q,p); } *images=GetFirstImageInList(q); break; } break; } case 'l': { if (LocaleCompare("layers",option+1) == 0) { Image *layers; ImageLayerMethod method; (void) SyncImagesSettings(mogrify_info,*images); layers=(Image *) NULL; method=(ImageLayerMethod) ParseCommandOption(MagickLayerOptions, MagickFalse,argv[i+1]); switch (method) { case CoalesceLayer: { layers=CoalesceImages(*images,exception); break; } case CompareAnyLayer: case CompareClearLayer: case CompareOverlayLayer: default: { layers=CompareImageLayers(*images,method,exception); break; } case MergeLayer: case FlattenLayer: case MosaicLayer: case TrimBoundsLayer: { layers=MergeImageLayers(*images,method,exception); break; } case DisposeLayer: { layers=DisposeImages(*images,exception); break; } case OptimizeImageLayer: { layers=OptimizeImageLayers(*images,exception); break; } case OptimizePlusLayer: { layers=OptimizePlusImageLayers(*images,exception); break; } case OptimizeTransLayer: { OptimizeImageTransparency(*images,exception); break; } case RemoveDupsLayer: { RemoveDuplicateLayers(images,exception); break; } case RemoveZeroLayer: { RemoveZeroDelayLayers(images,exception); break; } case OptimizeLayer: { /* General Purpose, GIF Animation Optimizer. */ layers=CoalesceImages(*images,exception); if (layers == (Image *) NULL) { status=MagickFalse; break; } InheritException(exception,&layers->exception); *images=DestroyImageList(*images); *images=layers; layers=OptimizeImageLayers(*images,exception); if (layers == (Image *) NULL) { status=MagickFalse; break; } InheritException(exception,&layers->exception); *images=DestroyImageList(*images); *images=layers; layers=(Image *) NULL; OptimizeImageTransparency(*images,exception); InheritException(exception,&(*images)->exception); (void) RemapImages(quantize_info,*images,(Image *) NULL); break; } case CompositeLayer: { CompositeOperator compose; Image *source; RectangleInfo geometry; /* Split image sequence at the first 'NULL:' image. */ source=(*images); while (source != (Image *) NULL) { source=GetNextImageInList(source); if ((source != (Image *) NULL) && (LocaleCompare(source->magick,"NULL") == 0)) break; } if (source != (Image *) NULL) { if ((GetPreviousImageInList(source) == (Image *) NULL) || (GetNextImageInList(source) == (Image *) NULL)) source=(Image *) NULL; else { /* Separate the two lists, junk the null: image. */ source=SplitImageList(source->previous); DeleteImageFromList(&source); } } if (source == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"MissingNullSeparator","layers Composite"); status=MagickFalse; break; } /* Adjust offset with gravity and virtual canvas. */ SetGeometry(*images,&geometry); (void) ParseAbsoluteGeometry((*images)->geometry,&geometry); geometry.width=source->page.width != 0 ? source->page.width : source->columns; geometry.height=source->page.height != 0 ? source->page.height : source->rows; GravityAdjustGeometry((*images)->page.width != 0 ? (*images)->page.width : (*images)->columns, (*images)->page.height != 0 ? (*images)->page.height : (*images)->rows,(*images)->gravity,&geometry); compose=OverCompositeOp; option=GetImageOption(mogrify_info,"compose"); if (option != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,option); CompositeLayers(*images,compose,source,geometry.x,geometry.y, exception); source=DestroyImageList(source); break; } } if (layers == (Image *) NULL) break; InheritException(exception,&layers->exception); *images=DestroyImageList(*images); *images=layers; break; } break; } case 'm': { if (LocaleCompare("map",option+1) == 0) { (void) SyncImagesSettings(mogrify_info,*images); if (*option == '+') { (void) RemapImages(quantize_info,*images,(Image *) NULL); InheritException(exception,&(*images)->exception); break; } i++; break; } if (LocaleCompare("maximum",option+1) == 0) { Image *maximum_image; /* Maximum image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images); maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception); if (maximum_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=maximum_image; break; } if (LocaleCompare("minimum",option+1) == 0) { Image *minimum_image; /* Minimum image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images); minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception); if (minimum_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=minimum_image; break; } if (LocaleCompare("morph",option+1) == 0) { Image *morph_image; (void) SyncImagesSettings(mogrify_info,*images); morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]), exception); if (morph_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=morph_image; break; } if (LocaleCompare("mosaic",option+1) == 0) { Image *mosaic_image; (void) SyncImagesSettings(mogrify_info,*images); mosaic_image=MergeImageLayers(*images,MosaicLayer,exception); if (mosaic_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=mosaic_image; break; } break; } case 'p': { if (LocaleCompare("poly",option+1) == 0) { char *args, token[MaxTextExtent]; const char *p; double *arguments; Image *polynomial_image; register ssize_t x; size_t number_arguments; /* Polynomial image. */ (void) SyncImageSettings(mogrify_info,*images); args=InterpretImageProperties(mogrify_info,*images,argv[i+1]); InheritException(exception,&(*images)->exception); if (args == (char *) NULL) break; p=(char *) args; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); } number_arguments=(size_t) x; arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*arguments)); if (arguments == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*images)->filename); (void) memset(arguments,0,number_arguments* sizeof(*arguments)); p=(char *) args; for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arguments[x]=StringToDouble(token,(char **) NULL); } args=DestroyString(args); polynomial_image=PolynomialImageChannel(*images,channel, number_arguments >> 1,arguments,exception); arguments=(double *) RelinquishMagickMemory(arguments); if (polynomial_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=polynomial_image; break; } if (LocaleCompare("print",option+1) == 0) { char *string; (void) SyncImagesSettings(mogrify_info,*images); string=InterpretImageProperties(mogrify_info,*images,argv[i+1]); if (string == (char *) NULL) break; InheritException(exception,&(*images)->exception); (void) FormatLocaleFile(stdout,"%s",string); string=DestroyString(string); } if (LocaleCompare("process",option+1) == 0) { char **arguments; int j, number_arguments; (void) SyncImagesSettings(mogrify_info,*images); arguments=StringToArgv(argv[i+1],&number_arguments); if (arguments == (char **) NULL) break; if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL)) { char breaker, quote, *token; const char *arguments; int next, status; size_t length; TokenInfo *token_info; /* Support old style syntax, filter="-option arg". */ length=strlen(argv[i+1]); token=(char *) NULL; if (~length >= (MaxTextExtent-1)) token=(char *) AcquireQuantumMemory(length+MaxTextExtent, sizeof(*token)); if (token == (char *) NULL) break; next=0; arguments=argv[i+1]; token_info=AcquireTokenInfo(); status=Tokenizer(token_info,0,token,length,arguments,"","=", "\"",'\0',&breaker,&next,&quote); token_info=DestroyTokenInfo(token_info); if (status == 0) { const char *argv; argv=(&(arguments[next])); (void) InvokeDynamicImageFilter(token,&(*images),1,&argv, exception); } token=DestroyString(token); break; } (void) SubstituteString(&arguments[1],"-",""); (void) InvokeDynamicImageFilter(arguments[1],&(*images), number_arguments-2,(const char **) arguments+2,exception); for (j=0; j < number_arguments; j++) arguments[j]=DestroyString(arguments[j]); arguments=(char **) RelinquishMagickMemory(arguments); break; } break; } case 'r': { if (LocaleCompare("reverse",option+1) == 0) { ReverseImageList(images); InheritException(exception,&(*images)->exception); break; } break; } case 's': { if (LocaleCompare("smush",option+1) == 0) { Image *smush_image; ssize_t offset; (void) SyncImagesSettings(mogrify_info,*images); offset=(ssize_t) StringToLong(argv[i+1]); smush_image=SmushImages(*images,*option == '-' ? MagickTrue : MagickFalse,offset,exception); if (smush_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=smush_image; break; } if (LocaleCompare("swap",option+1) == 0) { Image *p, *q, *u, *v; ssize_t swap_index; index=(-1); swap_index=(-2); if (*option != '+') { GeometryInfo geometry_info; MagickStatusType flags; swap_index=(-1); flags=ParseGeometry(argv[i+1],&geometry_info); index=(ssize_t) geometry_info.rho; if ((flags & SigmaValue) != 0) swap_index=(ssize_t) geometry_info.sigma; } p=GetImageFromList(*images,index); q=GetImageFromList(*images,swap_index); if ((p == (Image *) NULL) || (q == (Image *) NULL)) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",(*images)->filename); status=MagickFalse; break; } if (p == q) break; u=CloneImage(p,0,0,MagickTrue,exception); if (u == (Image *) NULL) break; v=CloneImage(q,0,0,MagickTrue,exception); if (v == (Image *) NULL) { u=DestroyImage(u); break; } ReplaceImageInList(&p,v); ReplaceImageInList(&q,u); *images=GetFirstImageInList(q); break; } break; } case 'w': { if (LocaleCompare("write",option+1) == 0) { char key[MaxTextExtent]; Image *write_images; ImageInfo *write_info; (void) SyncImagesSettings(mogrify_info,*images); (void) FormatLocaleString(key,MaxTextExtent,"cache:%s",argv[i+1]); (void) DeleteImageRegistry(key); write_images=(*images); if (*option == '+') write_images=CloneImageList(*images,exception); write_info=CloneImageInfo(mogrify_info); status&=WriteImages(write_info,write_images,argv[i+1],exception); write_info=DestroyImageInfo(write_info); if (*option == '+') write_images=DestroyImageList(write_images); break; } break; } default: break; } i+=count; } quantize_info=DestroyQuantizeInfo(quantize_info); mogrify_info=DestroyImageInfo(mogrify_info); status&=MogrifyImageInfo(image_info,argc,argv,exception); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImages() applies image processing options to a sequence of images as % prescribed by command line options. % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImages(ImageInfo *image_info, % const MagickBooleanType post,const int argc,const char **argv, % Image **images,Exceptioninfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o post: If true, post process image list operators otherwise pre-process. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o images: pointer to a pointer of the first image in image list. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImages(ImageInfo *image_info, const MagickBooleanType post,const int argc,const char **argv, Image **images,ExceptionInfo *exception) { #define MogrifyImageTag "Mogrify/Image" MagickStatusType status; MagickBooleanType proceed; size_t n; register ssize_t i; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (images == (Image **) NULL) return(MogrifyImage(image_info,argc,argv,images,exception)); assert((*images)->previous == (Image *) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); if ((argc <= 0) || (*argv == (char *) NULL)) return(MagickTrue); (void) SetImageInfoProgressMonitor(image_info,(MagickProgressMonitor) NULL, (void *) NULL); status=MagickTrue; #if 0 (void) FormatLocaleFile(stderr, "mogrify start %s %d (%s)\n",argv[0],argc, post?"post":"pre"); #endif /* Pre-process multi-image sequence operators */ if (post == MagickFalse) status&=MogrifyImageList(image_info,argc,argv,images,exception); /* For each image, process simple single image operators */ i=0; n=GetImageListLength(*images); for (;;) { #if 0 (void) FormatLocaleFile(stderr,"mogrify %ld of %ld\n",(long) GetImageIndexInList(*images),(long)GetImageListLength(*images)); #endif status&=MogrifyImage(image_info,argc,argv,images,exception); proceed=SetImageProgress(*images,MogrifyImageTag,(MagickOffsetType) i, n); if (proceed == MagickFalse) break; if ((*images)->next == (Image *) NULL) break; *images=(*images)->next; i++; } assert(*images != (Image *) NULL); #if 0 (void) FormatLocaleFile(stderr,"mogrify end %ld of %ld\n",(long) GetImageIndexInList(*images),(long)GetImageListLength(*images)); #endif /* Post-process, multi-image sequence operators */ *images=GetFirstImageInList(*images); if (post != MagickFalse) status&=MogrifyImageList(image_info,argc,argv,images,exception); return(status != 0 ? MagickTrue : MagickFalse); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_945_0
crossvul-cpp_data_good_2297_1
/* * Copyright (c) Christos Zoulas 2003. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: funcs.c,v 1.73 2014/09/10 18:41:51 christos Exp $") #endif /* lint */ #include "magic.h" #include <assert.h> #include <stdarg.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #if defined(HAVE_WCHAR_H) #include <wchar.h> #endif #if defined(HAVE_WCTYPE_H) #include <wctype.h> #endif #if defined(HAVE_LIMITS_H) #include <limits.h> #endif #ifndef SIZE_MAX #define SIZE_MAX ((size_t)~0) #endif /* * Like printf, only we append to a buffer. */ protected int file_vprintf(struct magic_set *ms, const char *fmt, va_list ap) { int len; char *buf, *newstr; if (ms->event_flags & EVENT_HAD_ERR) return 0; len = vasprintf(&buf, fmt, ap); if (len < 0) goto out; if (ms->o.buf != NULL) { len = asprintf(&newstr, "%s%s", ms->o.buf, buf); free(buf); if (len < 0) goto out; free(ms->o.buf); buf = newstr; } ms->o.buf = buf; return 0; out: file_error(ms, errno, "vasprintf failed"); return -1; } protected int file_printf(struct magic_set *ms, const char *fmt, ...) { int rv; va_list ap; va_start(ap, fmt); rv = file_vprintf(ms, fmt, ap); va_end(ap); return rv; } /* * error - print best error message possible */ /*VARARGS*/ __attribute__((__format__(__printf__, 3, 0))) private void file_error_core(struct magic_set *ms, int error, const char *f, va_list va, size_t lineno) { /* Only the first error is ok */ if (ms->event_flags & EVENT_HAD_ERR) return; if (lineno != 0) { free(ms->o.buf); ms->o.buf = NULL; file_printf(ms, "line %" SIZE_T_FORMAT "u: ", lineno); } file_vprintf(ms, f, va); if (error > 0) file_printf(ms, " (%s)", strerror(error)); ms->event_flags |= EVENT_HAD_ERR; ms->error = error; } /*VARARGS*/ protected void file_error(struct magic_set *ms, int error, const char *f, ...) { va_list va; va_start(va, f); file_error_core(ms, error, f, va, 0); va_end(va); } /* * Print an error with magic line number. */ /*VARARGS*/ protected void file_magerror(struct magic_set *ms, const char *f, ...) { va_list va; va_start(va, f); file_error_core(ms, 0, f, va, ms->line); va_end(va); } protected void file_oomem(struct magic_set *ms, size_t len) { file_error(ms, errno, "cannot allocate %" SIZE_T_FORMAT "u bytes", len); } protected void file_badseek(struct magic_set *ms) { file_error(ms, errno, "error seeking"); } protected void file_badread(struct magic_set *ms) { file_error(ms, errno, "error reading"); } #ifndef COMPILE_ONLY protected int file_buffer(struct magic_set *ms, int fd, const char *inname __attribute__ ((unused)), const void *buf, size_t nb) { int m = 0, rv = 0, looks_text = 0; int mime = ms->flags & MAGIC_MIME; const unsigned char *ubuf = CAST(const unsigned char *, buf); unichar *u8buf = NULL; size_t ulen; const char *code = NULL; const char *code_mime = "binary"; const char *type = "application/octet-stream"; const char *def = "data"; const char *ftype = NULL; if (nb == 0) { def = "empty"; type = "application/x-empty"; goto simple; } else if (nb == 1) { def = "very short file (no magic)"; goto simple; } if ((ms->flags & MAGIC_NO_CHECK_ENCODING) == 0) { looks_text = file_encoding(ms, ubuf, nb, &u8buf, &ulen, &code, &code_mime, &ftype); } #ifdef __EMX__ if ((ms->flags & MAGIC_NO_CHECK_APPTYPE) == 0 && inname) { switch (file_os2_apptype(ms, inname, buf, nb)) { case -1: return -1; case 0: break; default: return 1; } } #endif #if HAVE_FORK /* try compression stuff */ if ((ms->flags & MAGIC_NO_CHECK_COMPRESS) == 0) if ((m = file_zmagic(ms, fd, inname, ubuf, nb)) != 0) { if ((ms->flags & MAGIC_DEBUG) != 0) (void)fprintf(stderr, "zmagic %d\n", m); goto done_encoding; } #endif /* Check if we have a tar file */ if ((ms->flags & MAGIC_NO_CHECK_TAR) == 0) if ((m = file_is_tar(ms, ubuf, nb)) != 0) { if ((ms->flags & MAGIC_DEBUG) != 0) (void)fprintf(stderr, "tar %d\n", m); goto done; } /* Check if we have a CDF file */ if ((ms->flags & MAGIC_NO_CHECK_CDF) == 0) if ((m = file_trycdf(ms, fd, ubuf, nb)) != 0) { if ((ms->flags & MAGIC_DEBUG) != 0) (void)fprintf(stderr, "cdf %d\n", m); goto done; } /* try soft magic tests */ if ((ms->flags & MAGIC_NO_CHECK_SOFT) == 0) if ((m = file_softmagic(ms, ubuf, nb, 0, BINTEST, looks_text)) != 0) { if ((ms->flags & MAGIC_DEBUG) != 0) (void)fprintf(stderr, "softmagic %d\n", m); #ifdef BUILTIN_ELF if ((ms->flags & MAGIC_NO_CHECK_ELF) == 0 && m == 1 && nb > 5 && fd != -1) { /* * We matched something in the file, so this * *might* be an ELF file, and the file is at * least 5 bytes long, so if it's an ELF file * it has at least one byte past the ELF magic * number - try extracting information from the * ELF headers that cannot easily * be * extracted with rules in the magic file. */ if ((m = file_tryelf(ms, fd, ubuf, nb)) != 0) if ((ms->flags & MAGIC_DEBUG) != 0) (void)fprintf(stderr, "elf %d\n", m); } #endif goto done; } /* try text properties */ if ((ms->flags & MAGIC_NO_CHECK_TEXT) == 0) { if ((m = file_ascmagic(ms, ubuf, nb, looks_text)) != 0) { if ((ms->flags & MAGIC_DEBUG) != 0) (void)fprintf(stderr, "ascmagic %d\n", m); goto done; } } simple: /* give up */ m = 1; if ((!mime || (mime & MAGIC_MIME_TYPE)) && file_printf(ms, "%s", mime ? type : def) == -1) { rv = -1; } done: if ((ms->flags & MAGIC_MIME_ENCODING) != 0) { if (ms->flags & MAGIC_MIME_TYPE) if (file_printf(ms, "; charset=") == -1) rv = -1; if (file_printf(ms, "%s", code_mime) == -1) rv = -1; } #if HAVE_FORK done_encoding: #endif free(u8buf); if (rv) return rv; return m; } #endif protected int file_reset(struct magic_set *ms) { if (ms->mlist[0] == NULL) { file_error(ms, 0, "no magic files loaded"); return -1; } if (ms->o.buf) { free(ms->o.buf); ms->o.buf = NULL; } if (ms->o.pbuf) { free(ms->o.pbuf); ms->o.pbuf = NULL; } ms->event_flags &= ~EVENT_HAD_ERR; ms->error = -1; return 0; } #define OCTALIFY(n, o) \ /*LINTED*/ \ (void)(*(n)++ = '\\', \ *(n)++ = (((uint32_t)*(o) >> 6) & 3) + '0', \ *(n)++ = (((uint32_t)*(o) >> 3) & 7) + '0', \ *(n)++ = (((uint32_t)*(o) >> 0) & 7) + '0', \ (o)++) protected const char * file_getbuffer(struct magic_set *ms) { char *pbuf, *op, *np; size_t psize, len; if (ms->event_flags & EVENT_HAD_ERR) return NULL; if (ms->flags & MAGIC_RAW) return ms->o.buf; if (ms->o.buf == NULL) return NULL; /* * 4 is for octal representation, + 1 is for NUL */ len = strlen(ms->o.buf); if (len > (SIZE_MAX - 1) / 4) { file_oomem(ms, len); return NULL; } psize = len * 4 + 1; if ((pbuf = CAST(char *, realloc(ms->o.pbuf, psize))) == NULL) { file_oomem(ms, psize); return NULL; } ms->o.pbuf = pbuf; #if defined(HAVE_WCHAR_H) && defined(HAVE_MBRTOWC) && defined(HAVE_WCWIDTH) { mbstate_t state; wchar_t nextchar; int mb_conv = 1; size_t bytesconsumed; char *eop; (void)memset(&state, 0, sizeof(mbstate_t)); np = ms->o.pbuf; op = ms->o.buf; eop = op + len; while (op < eop) { bytesconsumed = mbrtowc(&nextchar, op, (size_t)(eop - op), &state); if (bytesconsumed == (size_t)(-1) || bytesconsumed == (size_t)(-2)) { mb_conv = 0; break; } if (iswprint(nextchar)) { (void)memcpy(np, op, bytesconsumed); op += bytesconsumed; np += bytesconsumed; } else { while (bytesconsumed-- > 0) OCTALIFY(np, op); } } *np = '\0'; /* Parsing succeeded as a multi-byte sequence */ if (mb_conv != 0) return ms->o.pbuf; } #endif for (np = ms->o.pbuf, op = ms->o.buf; *op;) { if (isprint((unsigned char)*op)) { *np++ = *op++; } else { OCTALIFY(np, op); } } *np = '\0'; return ms->o.pbuf; } protected int file_check_mem(struct magic_set *ms, unsigned int level) { size_t len; if (level >= ms->c.len) { len = (ms->c.len += 20) * sizeof(*ms->c.li); ms->c.li = CAST(struct level_info *, (ms->c.li == NULL) ? malloc(len) : realloc(ms->c.li, len)); if (ms->c.li == NULL) { file_oomem(ms, len); return -1; } } ms->c.li[level].got_match = 0; #ifdef ENABLE_CONDITIONALS ms->c.li[level].last_match = 0; ms->c.li[level].last_cond = COND_NONE; #endif /* ENABLE_CONDITIONALS */ return 0; } protected size_t file_printedlen(const struct magic_set *ms) { return ms->o.buf == NULL ? 0 : strlen(ms->o.buf); } protected int file_replace(struct magic_set *ms, const char *pat, const char *rep) { file_regex_t rx; int rc, rv = -1; rc = file_regcomp(&rx, pat, REG_EXTENDED); if (rc) { file_regerror(&rx, rc, ms); } else { regmatch_t rm; int nm = 0; while (file_regexec(&rx, ms->o.buf, 1, &rm, 0) == 0) { ms->o.buf[rm.rm_so] = '\0'; if (file_printf(ms, "%s%s", rep, rm.rm_eo != 0 ? ms->o.buf + rm.rm_eo : "") == -1) goto out; nm++; } rv = nm; } out: file_regfree(&rx); return rv; } protected int file_regcomp(file_regex_t *rx, const char *pat, int flags) { #ifdef USE_C_LOCALE rx->c_lc_ctype = newlocale(LC_CTYPE_MASK, "C", 0); assert(rx->c_lc_ctype != NULL); rx->old_lc_ctype = uselocale(rx->c_lc_ctype); assert(rx->old_lc_ctype != NULL); #endif rx->pat = pat; return rx->rc = regcomp(&rx->rx, pat, flags); } protected int file_regexec(file_regex_t *rx, const char *str, size_t nmatch, regmatch_t* pmatch, int eflags) { assert(rx->rc == 0); return regexec(&rx->rx, str, nmatch, pmatch, eflags); } protected void file_regfree(file_regex_t *rx) { if (rx->rc == 0) regfree(&rx->rx); #ifdef USE_C_LOCALE (void)uselocale(rx->old_lc_ctype); freelocale(rx->c_lc_ctype); #endif } protected void file_regerror(file_regex_t *rx, int rc, struct magic_set *ms) { char errmsg[512]; (void)regerror(rc, &rx->rx, errmsg, sizeof(errmsg)); file_magerror(ms, "regex error %d for `%s', (%s)", rc, rx->pat, errmsg); } protected file_pushbuf_t * file_push_buffer(struct magic_set *ms) { file_pushbuf_t *pb; if (ms->event_flags & EVENT_HAD_ERR) return NULL; if ((pb = (CAST(file_pushbuf_t *, malloc(sizeof(*pb))))) == NULL) return NULL; pb->buf = ms->o.buf; pb->offset = ms->offset; ms->o.buf = NULL; ms->offset = 0; return pb; } protected char * file_pop_buffer(struct magic_set *ms, file_pushbuf_t *pb) { char *rbuf; if (ms->event_flags & EVENT_HAD_ERR) { free(pb->buf); free(pb); return NULL; } rbuf = ms->o.buf; ms->o.buf = pb->buf; ms->offset = pb->offset; free(pb); return rbuf; }
./CrossVul/dataset_final_sorted/CWE-399/c/good_2297_1
crossvul-cpp_data_bad_5367_0
/* $OpenBSD: kex.c,v 1.126 2016/09/28 21:44:52 djm Exp $ */ /* * Copyright (c) 2000, 2001 Markus Friedl. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "includes.h" #include <signal.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef WITH_OPENSSL #include <openssl/crypto.h> #include <openssl/dh.h> #endif #include "ssh2.h" #include "packet.h" #include "compat.h" #include "cipher.h" #include "sshkey.h" #include "kex.h" #include "log.h" #include "mac.h" #include "match.h" #include "misc.h" #include "dispatch.h" #include "monitor.h" #include "ssherr.h" #include "sshbuf.h" #include "digest.h" #if OPENSSL_VERSION_NUMBER >= 0x00907000L # if defined(HAVE_EVP_SHA256) # define evp_ssh_sha256 EVP_sha256 # else extern const EVP_MD *evp_ssh_sha256(void); # endif #endif /* prototype */ static int kex_choose_conf(struct ssh *); static int kex_input_newkeys(int, u_int32_t, void *); static const char *proposal_names[PROPOSAL_MAX] = { "KEX algorithms", "host key algorithms", "ciphers ctos", "ciphers stoc", "MACs ctos", "MACs stoc", "compression ctos", "compression stoc", "languages ctos", "languages stoc", }; struct kexalg { char *name; u_int type; int ec_nid; int hash_alg; }; static const struct kexalg kexalgs[] = { #ifdef WITH_OPENSSL { KEX_DH1, KEX_DH_GRP1_SHA1, 0, SSH_DIGEST_SHA1 }, { KEX_DH14_SHA1, KEX_DH_GRP14_SHA1, 0, SSH_DIGEST_SHA1 }, { KEX_DH14_SHA256, KEX_DH_GRP14_SHA256, 0, SSH_DIGEST_SHA256 }, { KEX_DH16_SHA512, KEX_DH_GRP16_SHA512, 0, SSH_DIGEST_SHA512 }, { KEX_DH18_SHA512, KEX_DH_GRP18_SHA512, 0, SSH_DIGEST_SHA512 }, { KEX_DHGEX_SHA1, KEX_DH_GEX_SHA1, 0, SSH_DIGEST_SHA1 }, #ifdef HAVE_EVP_SHA256 { KEX_DHGEX_SHA256, KEX_DH_GEX_SHA256, 0, SSH_DIGEST_SHA256 }, #endif /* HAVE_EVP_SHA256 */ #ifdef OPENSSL_HAS_ECC { KEX_ECDH_SHA2_NISTP256, KEX_ECDH_SHA2, NID_X9_62_prime256v1, SSH_DIGEST_SHA256 }, { KEX_ECDH_SHA2_NISTP384, KEX_ECDH_SHA2, NID_secp384r1, SSH_DIGEST_SHA384 }, # ifdef OPENSSL_HAS_NISTP521 { KEX_ECDH_SHA2_NISTP521, KEX_ECDH_SHA2, NID_secp521r1, SSH_DIGEST_SHA512 }, # endif /* OPENSSL_HAS_NISTP521 */ #endif /* OPENSSL_HAS_ECC */ #endif /* WITH_OPENSSL */ #if defined(HAVE_EVP_SHA256) || !defined(WITH_OPENSSL) { KEX_CURVE25519_SHA256, KEX_C25519_SHA256, 0, SSH_DIGEST_SHA256 }, { KEX_CURVE25519_SHA256_OLD, KEX_C25519_SHA256, 0, SSH_DIGEST_SHA256 }, #endif /* HAVE_EVP_SHA256 || !WITH_OPENSSL */ { NULL, -1, -1, -1}, }; char * kex_alg_list(char sep) { char *ret = NULL, *tmp; size_t nlen, rlen = 0; const struct kexalg *k; for (k = kexalgs; k->name != NULL; k++) { if (ret != NULL) ret[rlen++] = sep; nlen = strlen(k->name); if ((tmp = realloc(ret, rlen + nlen + 2)) == NULL) { free(ret); return NULL; } ret = tmp; memcpy(ret + rlen, k->name, nlen + 1); rlen += nlen; } return ret; } static const struct kexalg * kex_alg_by_name(const char *name) { const struct kexalg *k; for (k = kexalgs; k->name != NULL; k++) { if (strcmp(k->name, name) == 0) return k; } return NULL; } /* Validate KEX method name list */ int kex_names_valid(const char *names) { char *s, *cp, *p; if (names == NULL || strcmp(names, "") == 0) return 0; if ((s = cp = strdup(names)) == NULL) return 0; for ((p = strsep(&cp, ",")); p && *p != '\0'; (p = strsep(&cp, ","))) { if (kex_alg_by_name(p) == NULL) { error("Unsupported KEX algorithm \"%.100s\"", p); free(s); return 0; } } debug3("kex names ok: [%s]", names); free(s); return 1; } /* * Concatenate algorithm names, avoiding duplicates in the process. * Caller must free returned string. */ char * kex_names_cat(const char *a, const char *b) { char *ret = NULL, *tmp = NULL, *cp, *p; size_t len; if (a == NULL || *a == '\0') return NULL; if (b == NULL || *b == '\0') return strdup(a); if (strlen(b) > 1024*1024) return NULL; len = strlen(a) + strlen(b) + 2; if ((tmp = cp = strdup(b)) == NULL || (ret = calloc(1, len)) == NULL) { free(tmp); return NULL; } strlcpy(ret, a, len); for ((p = strsep(&cp, ",")); p && *p != '\0'; (p = strsep(&cp, ","))) { if (match_list(ret, p, NULL) != NULL) continue; /* Algorithm already present */ if (strlcat(ret, ",", len) >= len || strlcat(ret, p, len) >= len) { free(tmp); free(ret); return NULL; /* Shouldn't happen */ } } free(tmp); return ret; } /* * Assemble a list of algorithms from a default list and a string from a * configuration file. The user-provided string may begin with '+' to * indicate that it should be appended to the default. */ int kex_assemble_names(const char *def, char **list) { char *ret; if (list == NULL || *list == NULL || **list == '\0') { *list = strdup(def); return 0; } if (**list != '+') { return 0; } if ((ret = kex_names_cat(def, *list + 1)) == NULL) return SSH_ERR_ALLOC_FAIL; free(*list); *list = ret; return 0; } /* put algorithm proposal into buffer */ int kex_prop2buf(struct sshbuf *b, char *proposal[PROPOSAL_MAX]) { u_int i; int r; sshbuf_reset(b); /* * add a dummy cookie, the cookie will be overwritten by * kex_send_kexinit(), each time a kexinit is set */ for (i = 0; i < KEX_COOKIE_LEN; i++) { if ((r = sshbuf_put_u8(b, 0)) != 0) return r; } for (i = 0; i < PROPOSAL_MAX; i++) { if ((r = sshbuf_put_cstring(b, proposal[i])) != 0) return r; } if ((r = sshbuf_put_u8(b, 0)) != 0 || /* first_kex_packet_follows */ (r = sshbuf_put_u32(b, 0)) != 0) /* uint32 reserved */ return r; return 0; } /* parse buffer and return algorithm proposal */ int kex_buf2prop(struct sshbuf *raw, int *first_kex_follows, char ***propp) { struct sshbuf *b = NULL; u_char v; u_int i; char **proposal = NULL; int r; *propp = NULL; if ((proposal = calloc(PROPOSAL_MAX, sizeof(char *))) == NULL) return SSH_ERR_ALLOC_FAIL; if ((b = sshbuf_fromb(raw)) == NULL) { r = SSH_ERR_ALLOC_FAIL; goto out; } if ((r = sshbuf_consume(b, KEX_COOKIE_LEN)) != 0) /* skip cookie */ goto out; /* extract kex init proposal strings */ for (i = 0; i < PROPOSAL_MAX; i++) { if ((r = sshbuf_get_cstring(b, &(proposal[i]), NULL)) != 0) goto out; debug2("%s: %s", proposal_names[i], proposal[i]); } /* first kex follows / reserved */ if ((r = sshbuf_get_u8(b, &v)) != 0 || /* first_kex_follows */ (r = sshbuf_get_u32(b, &i)) != 0) /* reserved */ goto out; if (first_kex_follows != NULL) *first_kex_follows = v; debug2("first_kex_follows %d ", v); debug2("reserved %u ", i); r = 0; *propp = proposal; out: if (r != 0 && proposal != NULL) kex_prop_free(proposal); sshbuf_free(b); return r; } void kex_prop_free(char **proposal) { u_int i; if (proposal == NULL) return; for (i = 0; i < PROPOSAL_MAX; i++) free(proposal[i]); free(proposal); } /* ARGSUSED */ static int kex_protocol_error(int type, u_int32_t seq, void *ctxt) { struct ssh *ssh = active_state; /* XXX */ int r; error("kex protocol error: type %d seq %u", type, seq); if ((r = sshpkt_start(ssh, SSH2_MSG_UNIMPLEMENTED)) != 0 || (r = sshpkt_put_u32(ssh, seq)) != 0 || (r = sshpkt_send(ssh)) != 0) return r; return 0; } static void kex_reset_dispatch(struct ssh *ssh) { ssh_dispatch_range(ssh, SSH2_MSG_TRANSPORT_MIN, SSH2_MSG_TRANSPORT_MAX, &kex_protocol_error); ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, &kex_input_kexinit); } static int kex_send_ext_info(struct ssh *ssh) { int r; char *algs; if ((algs = sshkey_alg_list(0, 1, ',')) == NULL) return SSH_ERR_ALLOC_FAIL; if ((r = sshpkt_start(ssh, SSH2_MSG_EXT_INFO)) != 0 || (r = sshpkt_put_u32(ssh, 1)) != 0 || (r = sshpkt_put_cstring(ssh, "server-sig-algs")) != 0 || (r = sshpkt_put_cstring(ssh, algs)) != 0 || (r = sshpkt_send(ssh)) != 0) goto out; /* success */ r = 0; out: free(algs); return r; } int kex_send_newkeys(struct ssh *ssh) { int r; kex_reset_dispatch(ssh); if ((r = sshpkt_start(ssh, SSH2_MSG_NEWKEYS)) != 0 || (r = sshpkt_send(ssh)) != 0) return r; debug("SSH2_MSG_NEWKEYS sent"); debug("expecting SSH2_MSG_NEWKEYS"); ssh_dispatch_set(ssh, SSH2_MSG_NEWKEYS, &kex_input_newkeys); if (ssh->kex->ext_info_c) if ((r = kex_send_ext_info(ssh)) != 0) return r; return 0; } int kex_input_ext_info(int type, u_int32_t seq, void *ctxt) { struct ssh *ssh = ctxt; struct kex *kex = ssh->kex; u_int32_t i, ninfo; char *name, *val, *found; int r; debug("SSH2_MSG_EXT_INFO received"); ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, &kex_protocol_error); if ((r = sshpkt_get_u32(ssh, &ninfo)) != 0) return r; for (i = 0; i < ninfo; i++) { if ((r = sshpkt_get_cstring(ssh, &name, NULL)) != 0) return r; if ((r = sshpkt_get_cstring(ssh, &val, NULL)) != 0) { free(name); return r; } debug("%s: %s=<%s>", __func__, name, val); if (strcmp(name, "server-sig-algs") == 0) { found = match_list("rsa-sha2-256", val, NULL); if (found) { kex->rsa_sha2 = 256; free(found); } found = match_list("rsa-sha2-512", val, NULL); if (found) { kex->rsa_sha2 = 512; free(found); } } free(name); free(val); } return sshpkt_get_end(ssh); } static int kex_input_newkeys(int type, u_int32_t seq, void *ctxt) { struct ssh *ssh = ctxt; struct kex *kex = ssh->kex; int r; debug("SSH2_MSG_NEWKEYS received"); ssh_dispatch_set(ssh, SSH2_MSG_NEWKEYS, &kex_protocol_error); if ((r = sshpkt_get_end(ssh)) != 0) return r; if ((r = ssh_set_newkeys(ssh, MODE_IN)) != 0) return r; kex->done = 1; sshbuf_reset(kex->peer); /* sshbuf_reset(kex->my); */ kex->flags &= ~KEX_INIT_SENT; free(kex->name); kex->name = NULL; return 0; } int kex_send_kexinit(struct ssh *ssh) { u_char *cookie; struct kex *kex = ssh->kex; int r; if (kex == NULL) return SSH_ERR_INTERNAL_ERROR; if (kex->flags & KEX_INIT_SENT) return 0; kex->done = 0; /* generate a random cookie */ if (sshbuf_len(kex->my) < KEX_COOKIE_LEN) return SSH_ERR_INVALID_FORMAT; if ((cookie = sshbuf_mutable_ptr(kex->my)) == NULL) return SSH_ERR_INTERNAL_ERROR; arc4random_buf(cookie, KEX_COOKIE_LEN); if ((r = sshpkt_start(ssh, SSH2_MSG_KEXINIT)) != 0 || (r = sshpkt_putb(ssh, kex->my)) != 0 || (r = sshpkt_send(ssh)) != 0) return r; debug("SSH2_MSG_KEXINIT sent"); kex->flags |= KEX_INIT_SENT; return 0; } /* ARGSUSED */ int kex_input_kexinit(int type, u_int32_t seq, void *ctxt) { struct ssh *ssh = ctxt; struct kex *kex = ssh->kex; const u_char *ptr; u_int i; size_t dlen; int r; debug("SSH2_MSG_KEXINIT received"); if (kex == NULL) return SSH_ERR_INVALID_ARGUMENT; ptr = sshpkt_ptr(ssh, &dlen); if ((r = sshbuf_put(kex->peer, ptr, dlen)) != 0) return r; /* discard packet */ for (i = 0; i < KEX_COOKIE_LEN; i++) if ((r = sshpkt_get_u8(ssh, NULL)) != 0) return r; for (i = 0; i < PROPOSAL_MAX; i++) if ((r = sshpkt_get_string(ssh, NULL, NULL)) != 0) return r; /* * XXX RFC4253 sec 7: "each side MAY guess" - currently no supported * KEX method has the server move first, but a server might be using * a custom method or one that we otherwise don't support. We should * be prepared to remember first_kex_follows here so we can eat a * packet later. * XXX2 - RFC4253 is kind of ambiguous on what first_kex_follows means * for cases where the server *doesn't* go first. I guess we should * ignore it when it is set for these cases, which is what we do now. */ if ((r = sshpkt_get_u8(ssh, NULL)) != 0 || /* first_kex_follows */ (r = sshpkt_get_u32(ssh, NULL)) != 0 || /* reserved */ (r = sshpkt_get_end(ssh)) != 0) return r; if (!(kex->flags & KEX_INIT_SENT)) if ((r = kex_send_kexinit(ssh)) != 0) return r; if ((r = kex_choose_conf(ssh)) != 0) return r; if (kex->kex_type < KEX_MAX && kex->kex[kex->kex_type] != NULL) return (kex->kex[kex->kex_type])(ssh); return SSH_ERR_INTERNAL_ERROR; } int kex_new(struct ssh *ssh, char *proposal[PROPOSAL_MAX], struct kex **kexp) { struct kex *kex; int r; *kexp = NULL; if ((kex = calloc(1, sizeof(*kex))) == NULL) return SSH_ERR_ALLOC_FAIL; if ((kex->peer = sshbuf_new()) == NULL || (kex->my = sshbuf_new()) == NULL) { r = SSH_ERR_ALLOC_FAIL; goto out; } if ((r = kex_prop2buf(kex->my, proposal)) != 0) goto out; kex->done = 0; kex_reset_dispatch(ssh); r = 0; *kexp = kex; out: if (r != 0) kex_free(kex); return r; } void kex_free_newkeys(struct newkeys *newkeys) { if (newkeys == NULL) return; if (newkeys->enc.key) { explicit_bzero(newkeys->enc.key, newkeys->enc.key_len); free(newkeys->enc.key); newkeys->enc.key = NULL; } if (newkeys->enc.iv) { explicit_bzero(newkeys->enc.iv, newkeys->enc.iv_len); free(newkeys->enc.iv); newkeys->enc.iv = NULL; } free(newkeys->enc.name); explicit_bzero(&newkeys->enc, sizeof(newkeys->enc)); free(newkeys->comp.name); explicit_bzero(&newkeys->comp, sizeof(newkeys->comp)); mac_clear(&newkeys->mac); if (newkeys->mac.key) { explicit_bzero(newkeys->mac.key, newkeys->mac.key_len); free(newkeys->mac.key); newkeys->mac.key = NULL; } free(newkeys->mac.name); explicit_bzero(&newkeys->mac, sizeof(newkeys->mac)); explicit_bzero(newkeys, sizeof(*newkeys)); free(newkeys); } void kex_free(struct kex *kex) { u_int mode; #ifdef WITH_OPENSSL if (kex->dh) DH_free(kex->dh); #ifdef OPENSSL_HAS_ECC if (kex->ec_client_key) EC_KEY_free(kex->ec_client_key); #endif /* OPENSSL_HAS_ECC */ #endif /* WITH_OPENSSL */ for (mode = 0; mode < MODE_MAX; mode++) { kex_free_newkeys(kex->newkeys[mode]); kex->newkeys[mode] = NULL; } sshbuf_free(kex->peer); sshbuf_free(kex->my); free(kex->session_id); free(kex->client_version_string); free(kex->server_version_string); free(kex->failed_choice); free(kex->hostkey_alg); free(kex->name); free(kex); } int kex_setup(struct ssh *ssh, char *proposal[PROPOSAL_MAX]) { int r; if ((r = kex_new(ssh, proposal, &ssh->kex)) != 0) return r; if ((r = kex_send_kexinit(ssh)) != 0) { /* we start */ kex_free(ssh->kex); ssh->kex = NULL; return r; } return 0; } /* * Request key re-exchange, returns 0 on success or a ssherr.h error * code otherwise. Must not be called if KEX is incomplete or in-progress. */ int kex_start_rekex(struct ssh *ssh) { if (ssh->kex == NULL) { error("%s: no kex", __func__); return SSH_ERR_INTERNAL_ERROR; } if (ssh->kex->done == 0) { error("%s: requested twice", __func__); return SSH_ERR_INTERNAL_ERROR; } ssh->kex->done = 0; return kex_send_kexinit(ssh); } static int choose_enc(struct sshenc *enc, char *client, char *server) { char *name = match_list(client, server, NULL); if (name == NULL) return SSH_ERR_NO_CIPHER_ALG_MATCH; if ((enc->cipher = cipher_by_name(name)) == NULL) return SSH_ERR_INTERNAL_ERROR; enc->name = name; enc->enabled = 0; enc->iv = NULL; enc->iv_len = cipher_ivlen(enc->cipher); enc->key = NULL; enc->key_len = cipher_keylen(enc->cipher); enc->block_size = cipher_blocksize(enc->cipher); return 0; } static int choose_mac(struct ssh *ssh, struct sshmac *mac, char *client, char *server) { char *name = match_list(client, server, NULL); if (name == NULL) return SSH_ERR_NO_MAC_ALG_MATCH; if (mac_setup(mac, name) < 0) return SSH_ERR_INTERNAL_ERROR; /* truncate the key */ if (ssh->compat & SSH_BUG_HMAC) mac->key_len = 16; mac->name = name; mac->key = NULL; mac->enabled = 0; return 0; } static int choose_comp(struct sshcomp *comp, char *client, char *server) { char *name = match_list(client, server, NULL); if (name == NULL) return SSH_ERR_NO_COMPRESS_ALG_MATCH; if (strcmp(name, "zlib@openssh.com") == 0) { comp->type = COMP_DELAYED; } else if (strcmp(name, "zlib") == 0) { comp->type = COMP_ZLIB; } else if (strcmp(name, "none") == 0) { comp->type = COMP_NONE; } else { return SSH_ERR_INTERNAL_ERROR; } comp->name = name; return 0; } static int choose_kex(struct kex *k, char *client, char *server) { const struct kexalg *kexalg; k->name = match_list(client, server, NULL); debug("kex: algorithm: %s", k->name ? k->name : "(no match)"); if (k->name == NULL) return SSH_ERR_NO_KEX_ALG_MATCH; if ((kexalg = kex_alg_by_name(k->name)) == NULL) return SSH_ERR_INTERNAL_ERROR; k->kex_type = kexalg->type; k->hash_alg = kexalg->hash_alg; k->ec_nid = kexalg->ec_nid; return 0; } static int choose_hostkeyalg(struct kex *k, char *client, char *server) { k->hostkey_alg = match_list(client, server, NULL); debug("kex: host key algorithm: %s", k->hostkey_alg ? k->hostkey_alg : "(no match)"); if (k->hostkey_alg == NULL) return SSH_ERR_NO_HOSTKEY_ALG_MATCH; k->hostkey_type = sshkey_type_from_name(k->hostkey_alg); if (k->hostkey_type == KEY_UNSPEC) return SSH_ERR_INTERNAL_ERROR; k->hostkey_nid = sshkey_ecdsa_nid_from_name(k->hostkey_alg); return 0; } static int proposals_match(char *my[PROPOSAL_MAX], char *peer[PROPOSAL_MAX]) { static int check[] = { PROPOSAL_KEX_ALGS, PROPOSAL_SERVER_HOST_KEY_ALGS, -1 }; int *idx; char *p; for (idx = &check[0]; *idx != -1; idx++) { if ((p = strchr(my[*idx], ',')) != NULL) *p = '\0'; if ((p = strchr(peer[*idx], ',')) != NULL) *p = '\0'; if (strcmp(my[*idx], peer[*idx]) != 0) { debug2("proposal mismatch: my %s peer %s", my[*idx], peer[*idx]); return (0); } } debug2("proposals match"); return (1); } static int kex_choose_conf(struct ssh *ssh) { struct kex *kex = ssh->kex; struct newkeys *newkeys; char **my = NULL, **peer = NULL; char **cprop, **sprop; int nenc, nmac, ncomp; u_int mode, ctos, need, dh_need, authlen; int r, first_kex_follows; debug2("local %s KEXINIT proposal", kex->server ? "server" : "client"); if ((r = kex_buf2prop(kex->my, NULL, &my)) != 0) goto out; debug2("peer %s KEXINIT proposal", kex->server ? "client" : "server"); if ((r = kex_buf2prop(kex->peer, &first_kex_follows, &peer)) != 0) goto out; if (kex->server) { cprop=peer; sprop=my; } else { cprop=my; sprop=peer; } /* Check whether client supports ext_info_c */ if (kex->server) { char *ext; ext = match_list("ext-info-c", peer[PROPOSAL_KEX_ALGS], NULL); kex->ext_info_c = (ext != NULL); free(ext); } /* Algorithm Negotiation */ if ((r = choose_kex(kex, cprop[PROPOSAL_KEX_ALGS], sprop[PROPOSAL_KEX_ALGS])) != 0) { kex->failed_choice = peer[PROPOSAL_KEX_ALGS]; peer[PROPOSAL_KEX_ALGS] = NULL; goto out; } if ((r = choose_hostkeyalg(kex, cprop[PROPOSAL_SERVER_HOST_KEY_ALGS], sprop[PROPOSAL_SERVER_HOST_KEY_ALGS])) != 0) { kex->failed_choice = peer[PROPOSAL_SERVER_HOST_KEY_ALGS]; peer[PROPOSAL_SERVER_HOST_KEY_ALGS] = NULL; goto out; } for (mode = 0; mode < MODE_MAX; mode++) { if ((newkeys = calloc(1, sizeof(*newkeys))) == NULL) { r = SSH_ERR_ALLOC_FAIL; goto out; } kex->newkeys[mode] = newkeys; ctos = (!kex->server && mode == MODE_OUT) || (kex->server && mode == MODE_IN); nenc = ctos ? PROPOSAL_ENC_ALGS_CTOS : PROPOSAL_ENC_ALGS_STOC; nmac = ctos ? PROPOSAL_MAC_ALGS_CTOS : PROPOSAL_MAC_ALGS_STOC; ncomp = ctos ? PROPOSAL_COMP_ALGS_CTOS : PROPOSAL_COMP_ALGS_STOC; if ((r = choose_enc(&newkeys->enc, cprop[nenc], sprop[nenc])) != 0) { kex->failed_choice = peer[nenc]; peer[nenc] = NULL; goto out; } authlen = cipher_authlen(newkeys->enc.cipher); /* ignore mac for authenticated encryption */ if (authlen == 0 && (r = choose_mac(ssh, &newkeys->mac, cprop[nmac], sprop[nmac])) != 0) { kex->failed_choice = peer[nmac]; peer[nmac] = NULL; goto out; } if ((r = choose_comp(&newkeys->comp, cprop[ncomp], sprop[ncomp])) != 0) { kex->failed_choice = peer[ncomp]; peer[ncomp] = NULL; goto out; } debug("kex: %s cipher: %s MAC: %s compression: %s", ctos ? "client->server" : "server->client", newkeys->enc.name, authlen == 0 ? newkeys->mac.name : "<implicit>", newkeys->comp.name); } need = dh_need = 0; for (mode = 0; mode < MODE_MAX; mode++) { newkeys = kex->newkeys[mode]; need = MAXIMUM(need, newkeys->enc.key_len); need = MAXIMUM(need, newkeys->enc.block_size); need = MAXIMUM(need, newkeys->enc.iv_len); need = MAXIMUM(need, newkeys->mac.key_len); dh_need = MAXIMUM(dh_need, cipher_seclen(newkeys->enc.cipher)); dh_need = MAXIMUM(dh_need, newkeys->enc.block_size); dh_need = MAXIMUM(dh_need, newkeys->enc.iv_len); dh_need = MAXIMUM(dh_need, newkeys->mac.key_len); } /* XXX need runden? */ kex->we_need = need; kex->dh_need = dh_need; /* ignore the next message if the proposals do not match */ if (first_kex_follows && !proposals_match(my, peer) && !(ssh->compat & SSH_BUG_FIRSTKEX)) ssh->dispatch_skip_packets = 1; r = 0; out: kex_prop_free(my); kex_prop_free(peer); return r; } static int derive_key(struct ssh *ssh, int id, u_int need, u_char *hash, u_int hashlen, const struct sshbuf *shared_secret, u_char **keyp) { struct kex *kex = ssh->kex; struct ssh_digest_ctx *hashctx = NULL; char c = id; u_int have; size_t mdsz; u_char *digest; int r; if ((mdsz = ssh_digest_bytes(kex->hash_alg)) == 0) return SSH_ERR_INVALID_ARGUMENT; if ((digest = calloc(1, ROUNDUP(need, mdsz))) == NULL) { r = SSH_ERR_ALLOC_FAIL; goto out; } /* K1 = HASH(K || H || "A" || session_id) */ if ((hashctx = ssh_digest_start(kex->hash_alg)) == NULL || ssh_digest_update_buffer(hashctx, shared_secret) != 0 || ssh_digest_update(hashctx, hash, hashlen) != 0 || ssh_digest_update(hashctx, &c, 1) != 0 || ssh_digest_update(hashctx, kex->session_id, kex->session_id_len) != 0 || ssh_digest_final(hashctx, digest, mdsz) != 0) { r = SSH_ERR_LIBCRYPTO_ERROR; goto out; } ssh_digest_free(hashctx); hashctx = NULL; /* * expand key: * Kn = HASH(K || H || K1 || K2 || ... || Kn-1) * Key = K1 || K2 || ... || Kn */ for (have = mdsz; need > have; have += mdsz) { if ((hashctx = ssh_digest_start(kex->hash_alg)) == NULL || ssh_digest_update_buffer(hashctx, shared_secret) != 0 || ssh_digest_update(hashctx, hash, hashlen) != 0 || ssh_digest_update(hashctx, digest, have) != 0 || ssh_digest_final(hashctx, digest + have, mdsz) != 0) { r = SSH_ERR_LIBCRYPTO_ERROR; goto out; } ssh_digest_free(hashctx); hashctx = NULL; } #ifdef DEBUG_KEX fprintf(stderr, "key '%c'== ", c); dump_digest("key", digest, need); #endif *keyp = digest; digest = NULL; r = 0; out: free(digest); ssh_digest_free(hashctx); return r; } #define NKEYS 6 int kex_derive_keys(struct ssh *ssh, u_char *hash, u_int hashlen, const struct sshbuf *shared_secret) { struct kex *kex = ssh->kex; u_char *keys[NKEYS]; u_int i, j, mode, ctos; int r; for (i = 0; i < NKEYS; i++) { if ((r = derive_key(ssh, 'A'+i, kex->we_need, hash, hashlen, shared_secret, &keys[i])) != 0) { for (j = 0; j < i; j++) free(keys[j]); return r; } } for (mode = 0; mode < MODE_MAX; mode++) { ctos = (!kex->server && mode == MODE_OUT) || (kex->server && mode == MODE_IN); kex->newkeys[mode]->enc.iv = keys[ctos ? 0 : 1]; kex->newkeys[mode]->enc.key = keys[ctos ? 2 : 3]; kex->newkeys[mode]->mac.key = keys[ctos ? 4 : 5]; } return 0; } #ifdef WITH_OPENSSL int kex_derive_keys_bn(struct ssh *ssh, u_char *hash, u_int hashlen, const BIGNUM *secret) { struct sshbuf *shared_secret; int r; if ((shared_secret = sshbuf_new()) == NULL) return SSH_ERR_ALLOC_FAIL; if ((r = sshbuf_put_bignum2(shared_secret, secret)) == 0) r = kex_derive_keys(ssh, hash, hashlen, shared_secret); sshbuf_free(shared_secret); return r; } #endif #ifdef WITH_SSH1 int derive_ssh1_session_id(BIGNUM *host_modulus, BIGNUM *server_modulus, u_int8_t cookie[8], u_int8_t id[16]) { u_int8_t hbuf[2048], sbuf[2048], obuf[SSH_DIGEST_MAX_LENGTH]; struct ssh_digest_ctx *hashctx = NULL; size_t hlen, slen; int r; hlen = BN_num_bytes(host_modulus); slen = BN_num_bytes(server_modulus); if (hlen < (512 / 8) || (u_int)hlen > sizeof(hbuf) || slen < (512 / 8) || (u_int)slen > sizeof(sbuf)) return SSH_ERR_KEY_BITS_MISMATCH; if (BN_bn2bin(host_modulus, hbuf) <= 0 || BN_bn2bin(server_modulus, sbuf) <= 0) { r = SSH_ERR_LIBCRYPTO_ERROR; goto out; } if ((hashctx = ssh_digest_start(SSH_DIGEST_MD5)) == NULL) { r = SSH_ERR_ALLOC_FAIL; goto out; } if (ssh_digest_update(hashctx, hbuf, hlen) != 0 || ssh_digest_update(hashctx, sbuf, slen) != 0 || ssh_digest_update(hashctx, cookie, 8) != 0 || ssh_digest_final(hashctx, obuf, sizeof(obuf)) != 0) { r = SSH_ERR_LIBCRYPTO_ERROR; goto out; } memcpy(id, obuf, ssh_digest_bytes(SSH_DIGEST_MD5)); r = 0; out: ssh_digest_free(hashctx); explicit_bzero(hbuf, sizeof(hbuf)); explicit_bzero(sbuf, sizeof(sbuf)); explicit_bzero(obuf, sizeof(obuf)); return r; } #endif #if defined(DEBUG_KEX) || defined(DEBUG_KEXDH) || defined(DEBUG_KEXECDH) void dump_digest(char *msg, u_char *digest, int len) { fprintf(stderr, "%s\n", msg); sshbuf_dump_data(digest, len, stderr); } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/bad_5367_0
crossvul-cpp_data_good_2376_0
/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors: * * Martin Hundebøll <martin@hundeboll.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "main.h" #include "fragmentation.h" #include "send.h" #include "originator.h" #include "routing.h" #include "hard-interface.h" #include "soft-interface.h" /** * batadv_frag_clear_chain - delete entries in the fragment buffer chain * @head: head of chain with entries. * * Free fragments in the passed hlist. Should be called with appropriate lock. */ static void batadv_frag_clear_chain(struct hlist_head *head) { struct batadv_frag_list_entry *entry; struct hlist_node *node; hlist_for_each_entry_safe(entry, node, head, list) { hlist_del(&entry->list); kfree_skb(entry->skb); kfree(entry); } } /** * batadv_frag_purge_orig - free fragments associated to an orig * @orig_node: originator to free fragments from * @check_cb: optional function to tell if an entry should be purged */ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node, bool (*check_cb)(struct batadv_frag_table_entry *)) { struct batadv_frag_table_entry *chain; uint8_t i; for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) { chain = &orig_node->fragments[i]; spin_lock_bh(&orig_node->fragments[i].lock); if (!check_cb || check_cb(chain)) { batadv_frag_clear_chain(&orig_node->fragments[i].head); orig_node->fragments[i].size = 0; } spin_unlock_bh(&orig_node->fragments[i].lock); } } /** * batadv_frag_size_limit - maximum possible size of packet to be fragmented * * Returns the maximum size of payload that can be fragmented. */ static int batadv_frag_size_limit(void) { int limit = BATADV_FRAG_MAX_FRAG_SIZE; limit -= sizeof(struct batadv_frag_packet); limit *= BATADV_FRAG_MAX_FRAGMENTS; return limit; } /** * batadv_frag_init_chain - check and prepare fragment chain for new fragment * @chain: chain in fragments table to init * @seqno: sequence number of the received fragment * * Make chain ready for a fragment with sequence number "seqno". Delete existing * entries if they have an "old" sequence number. * * Caller must hold chain->lock. * * Returns true if chain is empty and caller can just insert the new fragment * without searching for the right position. */ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain, uint16_t seqno) { if (chain->seqno == seqno) return false; if (!hlist_empty(&chain->head)) batadv_frag_clear_chain(&chain->head); chain->size = 0; chain->seqno = seqno; return true; } /** * batadv_frag_insert_packet - insert a fragment into a fragment chain * @orig_node: originator that the fragment was received from * @skb: skb to insert * @chain_out: list head to attach complete chains of fragments to * * Insert a new fragment into the reverse ordered chain in the right table * entry. The hash table entry is cleared if "old" fragments exist in it. * * Returns true if skb is buffered, false on error. If the chain has all the * fragments needed to merge the packet, the chain is moved to the passed head * to avoid locking the chain in the table. */ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, struct sk_buff *skb, struct hlist_head *chain_out) { struct batadv_frag_table_entry *chain; struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr; struct batadv_frag_list_entry *frag_entry_last = NULL; struct batadv_frag_packet *frag_packet; uint8_t bucket; uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet); bool ret = false; /* Linearize packet to avoid linearizing 16 packets in a row when doing * the later merge. Non-linear merge should be added to remove this * linearization. */ if (skb_linearize(skb) < 0) goto err; frag_packet = (struct batadv_frag_packet *)skb->data; seqno = ntohs(frag_packet->seqno); bucket = seqno % BATADV_FRAG_BUFFER_COUNT; frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC); if (!frag_entry_new) goto err; frag_entry_new->skb = skb; frag_entry_new->no = frag_packet->no; /* Select entry in the "chain table" and delete any prior fragments * with another sequence number. batadv_frag_init_chain() returns true, * if the list is empty at return. */ chain = &orig_node->fragments[bucket]; spin_lock_bh(&chain->lock); if (batadv_frag_init_chain(chain, seqno)) { hlist_add_head(&frag_entry_new->list, &chain->head); chain->size = skb->len - hdr_size; chain->timestamp = jiffies; ret = true; goto out; } /* Find the position for the new fragment. */ hlist_for_each_entry(frag_entry_curr, &chain->head, list) { /* Drop packet if fragment already exists. */ if (frag_entry_curr->no == frag_entry_new->no) goto err_unlock; /* Order fragments from highest to lowest. */ if (frag_entry_curr->no < frag_entry_new->no) { hlist_add_before(&frag_entry_new->list, &frag_entry_curr->list); chain->size += skb->len - hdr_size; chain->timestamp = jiffies; ret = true; goto out; } /* store current entry because it could be the last in list */ frag_entry_last = frag_entry_curr; } /* Reached the end of the list, so insert after 'frag_entry_last'. */ if (likely(frag_entry_last)) { hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list); chain->size += skb->len - hdr_size; chain->timestamp = jiffies; ret = true; } out: if (chain->size > batadv_frag_size_limit() || ntohs(frag_packet->total_size) > batadv_frag_size_limit()) { /* Clear chain if total size of either the list or the packet * exceeds the maximum size of one merged packet. */ batadv_frag_clear_chain(&chain->head); chain->size = 0; } else if (ntohs(frag_packet->total_size) == chain->size) { /* All fragments received. Hand over chain to caller. */ hlist_move_list(&chain->head, chain_out); chain->size = 0; } err_unlock: spin_unlock_bh(&chain->lock); err: if (!ret) kfree(frag_entry_new); return ret; } /** * batadv_frag_merge_packets - merge a chain of fragments * @chain: head of chain with fragments * @skb: packet with total size of skb after merging * * Expand the first skb in the chain and copy the content of the remaining * skb's into the expanded one. After doing so, clear the chain. * * Returns the merged skb or NULL on error. */ static struct sk_buff * batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb) { struct batadv_frag_packet *packet; struct batadv_frag_list_entry *entry; struct sk_buff *skb_out = NULL; int size, hdr_size = sizeof(struct batadv_frag_packet); /* Make sure incoming skb has non-bogus data. */ packet = (struct batadv_frag_packet *)skb->data; size = ntohs(packet->total_size); if (size > batadv_frag_size_limit()) goto free; /* Remove first entry, as this is the destination for the rest of the * fragments. */ entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list); hlist_del(&entry->list); skb_out = entry->skb; kfree(entry); /* Make room for the rest of the fragments. */ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { kfree_skb(skb_out); skb_out = NULL; goto free; } /* Move the existing MAC header to just before the payload. (Override * the fragment header.) */ skb_pull_rcsum(skb_out, hdr_size); memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); skb_set_mac_header(skb_out, -ETH_HLEN); skb_reset_network_header(skb_out); skb_reset_transport_header(skb_out); /* Copy the payload of the each fragment into the last skb */ hlist_for_each_entry(entry, chain, list) { size = entry->skb->len - hdr_size; memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size, size); } free: /* Locking is not needed, because 'chain' is not part of any orig. */ batadv_frag_clear_chain(chain); return skb_out; } /** * batadv_frag_skb_buffer - buffer fragment for later merge * @skb: skb to buffer * @orig_node_src: originator that the skb is received from * * Add fragment to buffer and merge fragments if possible. * * There are three possible outcomes: 1) Packet is merged: Return true and * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb * to NULL; 3) Error: Return false and leave skb as is. */ bool batadv_frag_skb_buffer(struct sk_buff **skb, struct batadv_orig_node *orig_node_src) { struct sk_buff *skb_out = NULL; struct hlist_head head = HLIST_HEAD_INIT; bool ret = false; /* Add packet to buffer and table entry if merge is possible. */ if (!batadv_frag_insert_packet(orig_node_src, *skb, &head)) goto out_err; /* Leave if more fragments are needed to merge. */ if (hlist_empty(&head)) goto out; skb_out = batadv_frag_merge_packets(&head, *skb); if (!skb_out) goto out_err; out: *skb = skb_out; ret = true; out_err: return ret; } /** * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged * @skb: skb to forward * @recv_if: interface that the skb is received on * @orig_node_src: originator that the skb is received from * * Look up the next-hop of the fragments payload and check if the merged packet * will exceed the MTU towards the next-hop. If so, the fragment is forwarded * without merging it. * * Returns true if the fragment is consumed/forwarded, false otherwise. */ bool batadv_frag_skb_fwd(struct sk_buff *skb, struct batadv_hard_iface *recv_if, struct batadv_orig_node *orig_node_src) { struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct batadv_orig_node *orig_node_dst = NULL; struct batadv_neigh_node *neigh_node = NULL; struct batadv_frag_packet *packet; uint16_t total_size; bool ret = false; packet = (struct batadv_frag_packet *)skb->data; orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest); if (!orig_node_dst) goto out; neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if); if (!neigh_node) goto out; /* Forward the fragment, if the merged packet would be too big to * be assembled. */ total_size = ntohs(packet->total_size); if (total_size > neigh_node->if_incoming->net_dev->mtu) { batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD); batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, skb->len + ETH_HLEN); packet->ttl--; batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = true; } out: if (orig_node_dst) batadv_orig_node_free_ref(orig_node_dst); if (neigh_node) batadv_neigh_node_free_ref(neigh_node); return ret; } /** * batadv_frag_create - create a fragment from skb * @skb: skb to create fragment from * @frag_head: header to use in new fragment * @mtu: size of new fragment * * Split the passed skb into two fragments: A new one with size matching the * passed mtu and the old one with the rest. The new skb contains data from the * tail of the old skb. * * Returns the new fragment, NULL on error. */ static struct sk_buff *batadv_frag_create(struct sk_buff *skb, struct batadv_frag_packet *frag_head, unsigned int mtu) { struct sk_buff *skb_fragment; unsigned header_size = sizeof(*frag_head); unsigned fragment_size = mtu - header_size; skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); if (!skb_fragment) goto err; skb->priority = TC_PRIO_CONTROL; /* Eat the last mtu-bytes of the skb */ skb_reserve(skb_fragment, header_size + ETH_HLEN); skb_split(skb, skb_fragment, skb->len - fragment_size); /* Add the header */ skb_push(skb_fragment, header_size); memcpy(skb_fragment->data, frag_head, header_size); err: return skb_fragment; } /** * batadv_frag_send_packet - create up to 16 fragments from the passed skb * @skb: skb to create fragments from * @orig_node: final destination of the created fragments * @neigh_node: next-hop of the created fragments * * Returns true on success, false otherwise. */ bool batadv_frag_send_packet(struct sk_buff *skb, struct batadv_orig_node *orig_node, struct batadv_neigh_node *neigh_node) { struct batadv_priv *bat_priv; struct batadv_hard_iface *primary_if = NULL; struct batadv_frag_packet frag_header; struct sk_buff *skb_fragment; unsigned mtu = neigh_node->if_incoming->net_dev->mtu; unsigned header_size = sizeof(frag_header); unsigned max_fragment_size, max_packet_size; bool ret = false; /* To avoid merge and refragmentation at next-hops we never send * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE */ mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE); max_fragment_size = (mtu - header_size - ETH_HLEN); max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; /* Don't even try to fragment, if we need more than 16 fragments */ if (skb->len > max_packet_size) goto out_err; bat_priv = orig_node->bat_priv; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out_err; /* Create one header to be copied to all fragments */ frag_header.packet_type = BATADV_UNICAST_FRAG; frag_header.version = BATADV_COMPAT_VERSION; frag_header.ttl = BATADV_TTL; frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); frag_header.reserved = 0; frag_header.no = 0; frag_header.total_size = htons(skb->len); ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr); ether_addr_copy(frag_header.dest, orig_node->orig); /* Eat and send fragments from the tail of skb */ while (skb->len > max_fragment_size) { skb_fragment = batadv_frag_create(skb, &frag_header, mtu); if (!skb_fragment) goto out_err; batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, skb_fragment->len + ETH_HLEN); batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming, neigh_node->addr); frag_header.no++; /* The initial check in this function should cover this case */ if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) goto out_err; } /* Make room for the fragment header. */ if (batadv_skb_head_push(skb, header_size) < 0 || pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) goto out_err; memcpy(skb->data, &frag_header, header_size); /* Send the last fragment */ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, skb->len + ETH_HLEN); batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = true; out_err: if (primary_if) batadv_hardif_free_ref(primary_if); return ret; }
./CrossVul/dataset_final_sorted/CWE-399/c/good_2376_0
crossvul-cpp_data_good_3434_0
/* * threads.c: Thread support internal calls * * Author: * Dick Porter (dick@ximian.com) * Paolo Molaro (lupus@ximian.com) * Patrik Torstensson (patrik.torstensson@labs2.com) * * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com) * Copyright 2004-2009 Novell, Inc (http://www.novell.com) */ #include <config.h> #include <glib.h> #include <signal.h> #include <string.h> #if defined(__OpenBSD__) #include <pthread.h> #include <pthread_np.h> #endif #include <mono/metadata/object.h> #include <mono/metadata/domain-internals.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/threads.h> #include <mono/metadata/threadpool.h> #include <mono/metadata/threads-types.h> #include <mono/metadata/exception.h> #include <mono/metadata/environment.h> #include <mono/metadata/monitor.h> #include <mono/metadata/gc-internal.h> #include <mono/metadata/marshal.h> #include <mono/io-layer/io-layer.h> #ifndef HOST_WIN32 #include <mono/io-layer/threads.h> #endif #include <mono/metadata/object-internals.h> #include <mono/metadata/mono-debug-debugger.h> #include <mono/utils/mono-compiler.h> #include <mono/utils/mono-mmap.h> #include <mono/utils/mono-membar.h> #include <mono/utils/mono-time.h> #include <mono/utils/hazard-pointer.h> #include <mono/metadata/gc-internal.h> #ifdef PLATFORM_ANDROID #include <errno.h> extern int tkill (pid_t tid, int signal); #endif /*#define THREAD_DEBUG(a) do { a; } while (0)*/ #define THREAD_DEBUG(a) /*#define THREAD_WAIT_DEBUG(a) do { a; } while (0)*/ #define THREAD_WAIT_DEBUG(a) /*#define LIBGC_DEBUG(a) do { a; } while (0)*/ #define LIBGC_DEBUG(a) #define SPIN_TRYLOCK(i) (InterlockedCompareExchange (&(i), 1, 0) == 0) #define SPIN_LOCK(i) do { \ if (SPIN_TRYLOCK (i)) \ break; \ } while (1) #define SPIN_UNLOCK(i) i = 0 /* Provide this for systems with glib < 2.6 */ #ifndef G_GSIZE_FORMAT # if GLIB_SIZEOF_LONG == 8 # define G_GSIZE_FORMAT "lu" # else # define G_GSIZE_FORMAT "u" # endif #endif struct StartInfo { guint32 (*func)(void *); MonoThread *obj; MonoObject *delegate; void *start_arg; }; typedef union { gint32 ival; gfloat fval; } IntFloatUnion; typedef union { gint64 ival; gdouble fval; } LongDoubleUnion; typedef struct _MonoThreadDomainTls MonoThreadDomainTls; struct _MonoThreadDomainTls { MonoThreadDomainTls *next; guint32 offset; guint32 size; }; typedef struct { int idx; int offset; MonoThreadDomainTls *freelist; } StaticDataInfo; /* Number of cached culture objects in the MonoThread->cached_culture_info array * (per-type): we use the first NUM entries for CultureInfo and the last for * UICultureInfo. So the size of the array is really NUM_CACHED_CULTURES * 2. */ #define NUM_CACHED_CULTURES 4 #define CULTURES_START_IDX 0 #define UICULTURES_START_IDX NUM_CACHED_CULTURES /* Controls access to the 'threads' hash table */ #define mono_threads_lock() EnterCriticalSection (&threads_mutex) #define mono_threads_unlock() LeaveCriticalSection (&threads_mutex) static CRITICAL_SECTION threads_mutex; /* Controls access to context static data */ #define mono_contexts_lock() EnterCriticalSection (&contexts_mutex) #define mono_contexts_unlock() LeaveCriticalSection (&contexts_mutex) static CRITICAL_SECTION contexts_mutex; /* Holds current status of static data heap */ static StaticDataInfo thread_static_info; static StaticDataInfo context_static_info; /* The hash of existing threads (key is thread ID, value is * MonoInternalThread*) that need joining before exit */ static MonoGHashTable *threads=NULL; /* * Threads which are starting up and they are not in the 'threads' hash yet. * When handle_store is called for a thread, it will be removed from this hash table. * Protected by mono_threads_lock (). */ static MonoGHashTable *threads_starting_up = NULL; /* Maps a MonoThread to its start argument */ /* Protected by mono_threads_lock () */ static MonoGHashTable *thread_start_args = NULL; /* The TLS key that holds the MonoObject assigned to each thread */ static guint32 current_object_key = -1; #ifdef MONO_HAVE_FAST_TLS /* we need to use both the Tls* functions and __thread because * the gc needs to see all the threads */ MONO_FAST_TLS_DECLARE(tls_current_object); #define SET_CURRENT_OBJECT(x) do { \ MONO_FAST_TLS_SET (tls_current_object, x); \ TlsSetValue (current_object_key, x); \ } while (FALSE) #define GET_CURRENT_OBJECT() ((MonoInternalThread*) MONO_FAST_TLS_GET (tls_current_object)) #else #define SET_CURRENT_OBJECT(x) TlsSetValue (current_object_key, x) #define GET_CURRENT_OBJECT() (MonoInternalThread*) TlsGetValue (current_object_key) #endif /* function called at thread start */ static MonoThreadStartCB mono_thread_start_cb = NULL; /* function called at thread attach */ static MonoThreadAttachCB mono_thread_attach_cb = NULL; /* function called at thread cleanup */ static MonoThreadCleanupFunc mono_thread_cleanup_fn = NULL; /* function called to notify the runtime about a pending exception on the current thread */ static MonoThreadNotifyPendingExcFunc mono_thread_notify_pending_exc_fn = NULL; /* The default stack size for each thread */ static guint32 default_stacksize = 0; #define default_stacksize_for_thread(thread) ((thread)->stack_size? (thread)->stack_size: default_stacksize) static void thread_adjust_static_data (MonoInternalThread *thread); static void mono_free_static_data (gpointer* static_data, gboolean threadlocal); static void mono_init_static_data_info (StaticDataInfo *static_data); static guint32 mono_alloc_static_data_slot (StaticDataInfo *static_data, guint32 size, guint32 align); static gboolean mono_thread_resume (MonoInternalThread* thread); static void mono_thread_start (MonoThread *thread); static void signal_thread_state_change (MonoInternalThread *thread); static MonoException* mono_thread_execute_interruption (MonoInternalThread *thread); static void ref_stack_destroy (gpointer rs); /* Spin lock for InterlockedXXX 64 bit functions */ #define mono_interlocked_lock() EnterCriticalSection (&interlocked_mutex) #define mono_interlocked_unlock() LeaveCriticalSection (&interlocked_mutex) static CRITICAL_SECTION interlocked_mutex; /* global count of thread interruptions requested */ static gint32 thread_interruption_requested = 0; /* Event signaled when a thread changes its background mode */ static HANDLE background_change_event; static gboolean shutting_down = FALSE; guint32 mono_thread_get_tls_key (void) { return current_object_key; } gint32 mono_thread_get_tls_offset (void) { int offset; MONO_THREAD_VAR_OFFSET (tls_current_object,offset); return offset; } /* handle_store() and handle_remove() manage the array of threads that * still need to be waited for when the main thread exits. * * If handle_store() returns FALSE the thread must not be started * because Mono is shutting down. */ static gboolean handle_store(MonoThread *thread) { mono_threads_lock (); THREAD_DEBUG (g_message ("%s: thread %p ID %"G_GSIZE_FORMAT, __func__, thread, (gsize)thread->internal_thread->tid)); if (threads_starting_up) mono_g_hash_table_remove (threads_starting_up, thread); if (shutting_down) { mono_threads_unlock (); return FALSE; } if(threads==NULL) { MONO_GC_REGISTER_ROOT_FIXED (threads); threads=mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC); } /* We don't need to duplicate thread->handle, because it is * only closed when the thread object is finalized by the GC. */ g_assert (thread->internal_thread); mono_g_hash_table_insert(threads, (gpointer)(gsize)(thread->internal_thread->tid), thread->internal_thread); mono_threads_unlock (); return TRUE; } static gboolean handle_remove(MonoInternalThread *thread) { gboolean ret; gsize tid = thread->tid; THREAD_DEBUG (g_message ("%s: thread ID %"G_GSIZE_FORMAT, __func__, tid)); mono_threads_lock (); if (threads) { /* We have to check whether the thread object for the * tid is still the same in the table because the * thread might have been destroyed and the tid reused * in the meantime, in which case the tid would be in * the table, but with another thread object. */ if (mono_g_hash_table_lookup (threads, (gpointer)tid) == thread) { mono_g_hash_table_remove (threads, (gpointer)tid); ret = TRUE; } else { ret = FALSE; } } else ret = FALSE; mono_threads_unlock (); /* Don't close the handle here, wait for the object finalizer * to do it. Otherwise, the following race condition applies: * * 1) Thread exits (and handle_remove() closes the handle) * * 2) Some other handle is reassigned the same slot * * 3) Another thread tries to join the first thread, and * blocks waiting for the reassigned handle to be signalled * (which might never happen). This is possible, because the * thread calling Join() still has a reference to the first * thread's object. */ return ret; } static void ensure_synch_cs_set (MonoInternalThread *thread) { CRITICAL_SECTION *synch_cs; if (thread->synch_cs != NULL) { return; } synch_cs = g_new0 (CRITICAL_SECTION, 1); InitializeCriticalSection (synch_cs); if (InterlockedCompareExchangePointer ((gpointer *)&thread->synch_cs, synch_cs, NULL) != NULL) { /* Another thread must have installed this CS */ DeleteCriticalSection (synch_cs); g_free (synch_cs); } } /* * NOTE: this function can be called also for threads different from the current one: * make sure no code called from it will ever assume it is run on the thread that is * getting cleaned up. */ static void thread_cleanup (MonoInternalThread *thread) { g_assert (thread != NULL); if (thread->abort_state_handle) { mono_gchandle_free (thread->abort_state_handle); thread->abort_state_handle = 0; } thread->abort_exc = NULL; thread->current_appcontext = NULL; /* * This is necessary because otherwise we might have * cross-domain references which will not get cleaned up when * the target domain is unloaded. */ if (thread->cached_culture_info) { int i; for (i = 0; i < NUM_CACHED_CULTURES * 2; ++i) mono_array_set (thread->cached_culture_info, MonoObject*, i, NULL); } /* if the thread is not in the hash it has been removed already */ if (!handle_remove (thread)) { /* This needs to be called even if handle_remove () fails */ if (mono_thread_cleanup_fn) mono_thread_cleanup_fn (thread); return; } mono_release_type_locks (thread); EnterCriticalSection (thread->synch_cs); thread->state |= ThreadState_Stopped; thread->state &= ~ThreadState_Background; LeaveCriticalSection (thread->synch_cs); mono_profiler_thread_end (thread->tid); if (thread == mono_thread_internal_current ()) mono_thread_pop_appdomain_ref (); thread->cached_culture_info = NULL; mono_free_static_data (thread->static_data, TRUE); thread->static_data = NULL; ref_stack_destroy (thread->appdomain_refs); thread->appdomain_refs = NULL; if (mono_thread_cleanup_fn) mono_thread_cleanup_fn (thread); mono_thread_small_id_free (thread->small_id); MONO_GC_UNREGISTER_ROOT (thread->thread_pinning_ref); thread->small_id = -2; } static gpointer get_thread_static_data (MonoInternalThread *thread, guint32 offset) { int idx; g_assert ((offset & 0x80000000) == 0); offset &= 0x7fffffff; idx = (offset >> 24) - 1; return ((char*) thread->static_data [idx]) + (offset & 0xffffff); } static MonoThread** get_current_thread_ptr_for_domain (MonoDomain *domain, MonoInternalThread *thread) { static MonoClassField *current_thread_field = NULL; guint32 offset; if (!current_thread_field) { current_thread_field = mono_class_get_field_from_name (mono_defaults.thread_class, "current_thread"); g_assert (current_thread_field); } mono_class_vtable (domain, mono_defaults.thread_class); mono_domain_lock (domain); offset = GPOINTER_TO_UINT (g_hash_table_lookup (domain->special_static_fields, current_thread_field)); mono_domain_unlock (domain); g_assert (offset); return get_thread_static_data (thread, offset); } static void set_current_thread_for_domain (MonoDomain *domain, MonoInternalThread *thread, MonoThread *current) { MonoThread **current_thread_ptr = get_current_thread_ptr_for_domain (domain, thread); g_assert (current->obj.vtable->domain == domain); g_assert (!*current_thread_ptr); *current_thread_ptr = current; } static MonoInternalThread* create_internal_thread_object (void) { MonoVTable *vt = mono_class_vtable (mono_get_root_domain (), mono_defaults.internal_thread_class); return (MonoInternalThread*)mono_gc_alloc_mature (vt); } static MonoThread* create_thread_object (MonoDomain *domain) { MonoVTable *vt = mono_class_vtable (domain, mono_defaults.thread_class); return (MonoThread*)mono_gc_alloc_mature (vt); } static MonoThread* new_thread_with_internal (MonoDomain *domain, MonoInternalThread *internal) { MonoThread *thread = create_thread_object (domain); MONO_OBJECT_SETREF (thread, internal_thread, internal); return thread; } static void init_root_domain_thread (MonoInternalThread *thread, MonoThread *candidate) { MonoDomain *domain = mono_get_root_domain (); if (!candidate || candidate->obj.vtable->domain != domain) candidate = new_thread_with_internal (domain, thread); set_current_thread_for_domain (domain, thread, candidate); g_assert (!thread->root_domain_thread); MONO_OBJECT_SETREF (thread, root_domain_thread, candidate); } static guint32 WINAPI start_wrapper_internal(void *data) { struct StartInfo *start_info=(struct StartInfo *)data; guint32 (*start_func)(void *); void *start_arg; gsize tid; /* * We don't create a local to hold start_info->obj, so hopefully it won't get pinned during a * GC stack walk. */ MonoInternalThread *internal = start_info->obj->internal_thread; MonoObject *start_delegate = start_info->delegate; MonoDomain *domain = start_info->obj->obj.vtable->domain; THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Start wrapper", __func__, GetCurrentThreadId ())); /* We can be sure start_info->obj->tid and * start_info->obj->handle have been set, because the thread * was created suspended, and these values were set before the * thread resumed */ tid=internal->tid; SET_CURRENT_OBJECT (internal); mono_monitor_init_tls (); /* Every thread references the appdomain which created it */ mono_thread_push_appdomain_ref (domain); if (!mono_domain_set (domain, FALSE)) { /* No point in raising an appdomain_unloaded exception here */ /* FIXME: Cleanup here */ mono_thread_pop_appdomain_ref (); return 0; } start_func = start_info->func; start_arg = start_info->start_arg; /* We have to do this here because mono_thread_new_init() requires that root_domain_thread is set up. */ thread_adjust_static_data (internal); init_root_domain_thread (internal, start_info->obj); /* This MUST be called before any managed code can be * executed, as it calls the callback function that (for the * jit) sets the lmf marker. */ mono_thread_new_init (tid, &tid, start_func); internal->stack_ptr = &tid; LIBGC_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT",%d) Setting thread stack to %p", __func__, GetCurrentThreadId (), getpid (), thread->stack_ptr)); THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Setting current_object_key to %p", __func__, GetCurrentThreadId (), internal)); /* On 2.0 profile (and higher), set explicitly since state might have been Unknown */ if (internal->apartment_state == ThreadApartmentState_Unknown) internal->apartment_state = ThreadApartmentState_MTA; mono_thread_init_apartment_state (); if(internal->start_notify!=NULL) { /* Let the thread that called Start() know we're * ready */ ReleaseSemaphore (internal->start_notify, 1, NULL); } mono_threads_lock (); mono_g_hash_table_remove (thread_start_args, start_info->obj); mono_threads_unlock (); mono_thread_set_execution_context (start_info->obj->ec_to_set); start_info->obj->ec_to_set = NULL; g_free (start_info); THREAD_DEBUG (g_message ("%s: start_wrapper for %"G_GSIZE_FORMAT, __func__, internal->tid)); /* * Call this after calling start_notify, since the profiler callback might want * to lock the thread, and the lock is held by thread_start () which waits for * start_notify. */ mono_profiler_thread_start (tid); /* start_func is set only for unmanaged start functions */ if (start_func) { start_func (start_arg); } else { void *args [1]; g_assert (start_delegate != NULL); args [0] = start_arg; /* we may want to handle the exception here. See comment below on unhandled exceptions */ mono_runtime_delegate_invoke (start_delegate, args, NULL); } /* If the thread calls ExitThread at all, this remaining code * will not be executed, but the main thread will eventually * call thread_cleanup() on this thread's behalf. */ THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Start wrapper terminating", __func__, GetCurrentThreadId ())); thread_cleanup (internal); /* Do any cleanup needed for apartment state. This * cannot be done in thread_cleanup since thread_cleanup could be * called for a thread other than the current thread. * mono_thread_cleanup_apartment_state cleans up apartment * for the current thead */ mono_thread_cleanup_apartment_state (); /* Remove the reference to the thread object in the TLS data, * so the thread object can be finalized. This won't be * reached if the thread threw an uncaught exception, so those * thread handles will stay referenced :-( (This is due to * missing support for scanning thread-specific data in the * Boehm GC - the io-layer keeps a GC-visible hash of pointers * to TLS data.) */ SET_CURRENT_OBJECT (NULL); mono_domain_unset (); return(0); } static guint32 WINAPI start_wrapper(void *data) { #ifdef HAVE_SGEN_GC volatile int dummy; /* Avoid scanning the frames above this frame during a GC */ mono_gc_set_stack_end ((void*)&dummy); #endif return start_wrapper_internal (data); } void mono_thread_new_init (intptr_t tid, gpointer stack_start, gpointer func) { if (mono_thread_start_cb) { mono_thread_start_cb (tid, stack_start, func); } } void mono_threads_set_default_stacksize (guint32 stacksize) { default_stacksize = stacksize; } guint32 mono_threads_get_default_stacksize (void) { return default_stacksize; } /* * mono_create_thread: * * This is a wrapper around CreateThread which handles differences in the type of * the the 'tid' argument. */ gpointer mono_create_thread (WapiSecurityAttributes *security, guint32 stacksize, WapiThreadStart start, gpointer param, guint32 create, gsize *tid) { gpointer res; #ifdef HOST_WIN32 DWORD real_tid; res = CreateThread (security, stacksize, start, param, create, &real_tid); if (tid) *tid = real_tid; #else res = CreateThread (security, stacksize, start, param, create, tid); #endif return res; } /* * The thread start argument may be an object reference, and there is * no ref to keep it alive when the new thread is started but not yet * registered with the collector. So we store it in a GC tracked hash * table. * * LOCKING: Assumes the threads lock is held. */ static void register_thread_start_argument (MonoThread *thread, struct StartInfo *start_info) { if (thread_start_args == NULL) { MONO_GC_REGISTER_ROOT_FIXED (thread_start_args); thread_start_args = mono_g_hash_table_new (NULL, NULL); } mono_g_hash_table_insert (thread_start_args, thread, start_info->start_arg); } MonoInternalThread* mono_thread_create_internal (MonoDomain *domain, gpointer func, gpointer arg, gboolean threadpool_thread, guint32 stack_size) { MonoThread *thread; MonoInternalThread *internal; HANDLE thread_handle; struct StartInfo *start_info; gsize tid; thread = create_thread_object (domain); internal = create_internal_thread_object (); MONO_OBJECT_SETREF (thread, internal_thread, internal); start_info=g_new0 (struct StartInfo, 1); start_info->func = func; start_info->obj = thread; start_info->start_arg = arg; mono_threads_lock (); if (shutting_down) { mono_threads_unlock (); g_free (start_info); return NULL; } if (threads_starting_up == NULL) { MONO_GC_REGISTER_ROOT_FIXED (threads_starting_up); threads_starting_up = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_KEY_VALUE_GC); } register_thread_start_argument (thread, start_info); mono_g_hash_table_insert (threads_starting_up, thread, thread); mono_threads_unlock (); if (stack_size == 0) stack_size = default_stacksize_for_thread (internal); /* Create suspended, so we can do some housekeeping before the thread * starts */ thread_handle = mono_create_thread (NULL, stack_size, (LPTHREAD_START_ROUTINE)start_wrapper, start_info, CREATE_SUSPENDED, &tid); THREAD_DEBUG (g_message ("%s: Started thread ID %"G_GSIZE_FORMAT" (handle %p)", __func__, tid, thread_handle)); if (thread_handle == NULL) { /* The thread couldn't be created, so throw an exception */ mono_threads_lock (); mono_g_hash_table_remove (threads_starting_up, thread); mono_threads_unlock (); g_free (start_info); mono_raise_exception (mono_get_exception_execution_engine ("Couldn't create thread")); return NULL; } internal->handle=thread_handle; internal->tid=tid; internal->apartment_state=ThreadApartmentState_Unknown; internal->small_id = mono_thread_small_id_alloc (); internal->thread_pinning_ref = internal; MONO_GC_REGISTER_ROOT (internal->thread_pinning_ref); internal->synch_cs = g_new0 (CRITICAL_SECTION, 1); InitializeCriticalSection (internal->synch_cs); internal->threadpool_thread = threadpool_thread; if (threadpool_thread) mono_thread_set_state (internal, ThreadState_Background); if (handle_store (thread)) ResumeThread (thread_handle); return internal; } void mono_thread_create (MonoDomain *domain, gpointer func, gpointer arg) { mono_thread_create_internal (domain, func, arg, FALSE, 0); } /* * mono_thread_get_stack_bounds: * * Return the address and size of the current threads stack. Return NULL as the * stack address if the stack address cannot be determined. */ void mono_thread_get_stack_bounds (guint8 **staddr, size_t *stsize) { #if defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP) *staddr = (guint8*)pthread_get_stackaddr_np (pthread_self ()); *stsize = pthread_get_stacksize_np (pthread_self ()); *staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize () - 1)); return; /* FIXME: simplify the mess below */ #elif !defined(HOST_WIN32) pthread_attr_t attr; guint8 *current = (guint8*)&attr; pthread_attr_init (&attr); # ifdef HAVE_PTHREAD_GETATTR_NP pthread_getattr_np (pthread_self(), &attr); # else # ifdef HAVE_PTHREAD_ATTR_GET_NP pthread_attr_get_np (pthread_self(), &attr); # elif defined(sun) *staddr = NULL; pthread_attr_getstacksize (&attr, &stsize); # elif defined(__OpenBSD__) stack_t ss; int rslt; rslt = pthread_stackseg_np(pthread_self(), &ss); g_assert (rslt == 0); *staddr = (guint8*)((size_t)ss.ss_sp - ss.ss_size); *stsize = ss.ss_size; # else *staddr = NULL; *stsize = 0; return; # endif # endif # if !defined(sun) # if !defined(__OpenBSD__) pthread_attr_getstack (&attr, (void**)staddr, stsize); # endif if (*staddr) g_assert ((current > *staddr) && (current < *staddr + *stsize)); # endif pthread_attr_destroy (&attr); #else *staddr = NULL; *stsize = (size_t)-1; #endif /* When running under emacs, sometimes staddr is not aligned to a page size */ *staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize () - 1)); } MonoThread * mono_thread_attach (MonoDomain *domain) { MonoInternalThread *thread; MonoThread *current_thread; HANDLE thread_handle; gsize tid; if ((thread = mono_thread_internal_current ())) { if (domain != mono_domain_get ()) mono_domain_set (domain, TRUE); /* Already attached */ return mono_thread_current (); } if (!mono_gc_register_thread (&domain)) { g_error ("Thread %"G_GSIZE_FORMAT" calling into managed code is not registered with the GC. On UNIX, this can be fixed by #include-ing <gc.h> before <pthread.h> in the file containing the thread creation code.", GetCurrentThreadId ()); } thread = create_internal_thread_object (); thread_handle = GetCurrentThread (); g_assert (thread_handle); tid=GetCurrentThreadId (); /* * The handle returned by GetCurrentThread () is a pseudo handle, so it can't be used to * refer to the thread from other threads for things like aborting. */ DuplicateHandle (GetCurrentProcess (), thread_handle, GetCurrentProcess (), &thread_handle, THREAD_ALL_ACCESS, TRUE, 0); thread->handle=thread_handle; thread->tid=tid; #ifdef PLATFORM_ANDROID thread->android_tid = (gpointer) gettid (); #endif thread->apartment_state=ThreadApartmentState_Unknown; thread->small_id = mono_thread_small_id_alloc (); thread->thread_pinning_ref = thread; MONO_GC_REGISTER_ROOT (thread->thread_pinning_ref); thread->stack_ptr = &tid; thread->synch_cs = g_new0 (CRITICAL_SECTION, 1); InitializeCriticalSection (thread->synch_cs); THREAD_DEBUG (g_message ("%s: Attached thread ID %"G_GSIZE_FORMAT" (handle %p)", __func__, tid, thread_handle)); current_thread = new_thread_with_internal (domain, thread); if (!handle_store (current_thread)) { /* Mono is shutting down, so just wait for the end */ for (;;) Sleep (10000); } THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Setting current_object_key to %p", __func__, GetCurrentThreadId (), thread)); SET_CURRENT_OBJECT (thread); mono_domain_set (domain, TRUE); mono_monitor_init_tls (); thread_adjust_static_data (thread); init_root_domain_thread (thread, current_thread); if (domain != mono_get_root_domain ()) set_current_thread_for_domain (domain, thread, current_thread); if (mono_thread_attach_cb) { guint8 *staddr; size_t stsize; mono_thread_get_stack_bounds (&staddr, &stsize); if (staddr == NULL) mono_thread_attach_cb (tid, &tid); else mono_thread_attach_cb (tid, staddr + stsize); } // FIXME: Need a separate callback mono_profiler_thread_start (tid); return current_thread; } void mono_thread_detach (MonoThread *thread) { g_return_if_fail (thread != NULL); THREAD_DEBUG (g_message ("%s: mono_thread_detach for %p (%"G_GSIZE_FORMAT")", __func__, thread, (gsize)thread->internal_thread->tid)); thread_cleanup (thread->internal_thread); SET_CURRENT_OBJECT (NULL); mono_domain_unset (); /* Don't need to CloseHandle this thread, even though we took a * reference in mono_thread_attach (), because the GC will do it * when the Thread object is finalised. */ } void mono_thread_exit () { MonoInternalThread *thread = mono_thread_internal_current (); THREAD_DEBUG (g_message ("%s: mono_thread_exit for %p (%"G_GSIZE_FORMAT")", __func__, thread, (gsize)thread->tid)); thread_cleanup (thread); SET_CURRENT_OBJECT (NULL); mono_domain_unset (); /* we could add a callback here for embedders to use. */ if (mono_thread_get_main () && (thread == mono_thread_get_main ()->internal_thread)) exit (mono_environment_exitcode_get ()); ExitThread (-1); } void ves_icall_System_Threading_Thread_ConstructInternalThread (MonoThread *this) { MonoInternalThread *internal = create_internal_thread_object (); internal->state = ThreadState_Unstarted; internal->apartment_state = ThreadApartmentState_Unknown; InterlockedCompareExchangePointer ((gpointer)&this->internal_thread, internal, NULL); } HANDLE ves_icall_System_Threading_Thread_Thread_internal(MonoThread *this, MonoObject *start) { guint32 (*start_func)(void *); struct StartInfo *start_info; HANDLE thread; gsize tid; MonoInternalThread *internal; THREAD_DEBUG (g_message("%s: Trying to start a new thread: this (%p) start (%p)", __func__, this, start)); if (!this->internal_thread) ves_icall_System_Threading_Thread_ConstructInternalThread (this); internal = this->internal_thread; ensure_synch_cs_set (internal); EnterCriticalSection (internal->synch_cs); if ((internal->state & ThreadState_Unstarted) == 0) { LeaveCriticalSection (internal->synch_cs); mono_raise_exception (mono_get_exception_thread_state ("Thread has already been started.")); return NULL; } internal->small_id = -1; if ((internal->state & ThreadState_Aborted) != 0) { LeaveCriticalSection (internal->synch_cs); return this; } start_func = NULL; { /* This is freed in start_wrapper */ start_info = g_new0 (struct StartInfo, 1); start_info->func = start_func; start_info->start_arg = this->start_obj; /* FIXME: GC object stored in unmanaged memory */ start_info->delegate = start; start_info->obj = this; g_assert (this->obj.vtable->domain == mono_domain_get ()); internal->start_notify=CreateSemaphore (NULL, 0, 0x7fffffff, NULL); if (internal->start_notify==NULL) { LeaveCriticalSection (internal->synch_cs); g_warning ("%s: CreateSemaphore error 0x%x", __func__, GetLastError ()); g_free (start_info); return(NULL); } mono_threads_lock (); register_thread_start_argument (this, start_info); if (threads_starting_up == NULL) { MONO_GC_REGISTER_ROOT_FIXED (threads_starting_up); threads_starting_up = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_KEY_VALUE_GC); } mono_g_hash_table_insert (threads_starting_up, this, this); mono_threads_unlock (); thread=mono_create_thread(NULL, default_stacksize_for_thread (internal), (LPTHREAD_START_ROUTINE)start_wrapper, start_info, CREATE_SUSPENDED, &tid); if(thread==NULL) { LeaveCriticalSection (internal->synch_cs); mono_threads_lock (); mono_g_hash_table_remove (threads_starting_up, this); mono_threads_unlock (); g_warning("%s: CreateThread error 0x%x", __func__, GetLastError()); return(NULL); } internal->handle=thread; internal->tid=tid; internal->small_id = mono_thread_small_id_alloc (); internal->thread_pinning_ref = internal; MONO_GC_REGISTER_ROOT (internal->thread_pinning_ref); /* Don't call handle_store() here, delay it to Start. * We can't join a thread (trying to will just block * forever) until it actually starts running, so don't * store the handle till then. */ mono_thread_start (this); internal->state &= ~ThreadState_Unstarted; THREAD_DEBUG (g_message ("%s: Started thread ID %"G_GSIZE_FORMAT" (handle %p)", __func__, tid, thread)); LeaveCriticalSection (internal->synch_cs); return(thread); } } void ves_icall_System_Threading_InternalThread_Thread_free_internal (MonoInternalThread *this, HANDLE thread) { MONO_ARCH_SAVE_REGS; THREAD_DEBUG (g_message ("%s: Closing thread %p, handle %p", __func__, this, thread)); if (thread) CloseHandle (thread); if (this->synch_cs) { CRITICAL_SECTION *synch_cs = this->synch_cs; this->synch_cs = NULL; DeleteCriticalSection (synch_cs); g_free (synch_cs); } if (this->name) { void *name = this->name; this->name = NULL; g_free (name); } } static void mono_thread_start (MonoThread *thread) { MonoInternalThread *internal = thread->internal_thread; THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Launching thread %p (%"G_GSIZE_FORMAT")", __func__, GetCurrentThreadId (), internal, (gsize)internal->tid)); /* Only store the handle when the thread is about to be * launched, to avoid the main thread deadlocking while trying * to clean up a thread that will never be signalled. */ if (!handle_store (thread)) return; ResumeThread (internal->handle); if(internal->start_notify!=NULL) { /* Wait for the thread to set up its TLS data etc, so * theres no potential race condition if someone tries * to look up the data believing the thread has * started */ THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") waiting for thread %p (%"G_GSIZE_FORMAT") to start", __func__, GetCurrentThreadId (), internal, (gsize)internal->tid)); WaitForSingleObjectEx (internal->start_notify, INFINITE, FALSE); CloseHandle (internal->start_notify); internal->start_notify = NULL; } THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Done launching thread %p (%"G_GSIZE_FORMAT")", __func__, GetCurrentThreadId (), internal, (gsize)internal->tid)); } void ves_icall_System_Threading_Thread_Sleep_internal(gint32 ms) { guint32 res; MonoInternalThread *thread = mono_thread_internal_current (); THREAD_DEBUG (g_message ("%s: Sleeping for %d ms", __func__, ms)); mono_thread_current_check_pending_interrupt (); while (TRUE) { mono_thread_set_state (thread, ThreadState_WaitSleepJoin); res = SleepEx(ms,TRUE); mono_thread_clr_state (thread, ThreadState_WaitSleepJoin); if (res == WAIT_IO_COMPLETION) { /* we might have been interrupted */ MonoException* exc = mono_thread_execute_interruption (thread); if (exc) { mono_raise_exception (exc); } else { // FIXME: !INFINITE if (ms != INFINITE) break; } } else { break; } } } void ves_icall_System_Threading_Thread_SpinWait_nop (void) { } gint32 ves_icall_System_Threading_Thread_GetDomainID (void) { MONO_ARCH_SAVE_REGS; return mono_domain_get()->domain_id; } gboolean ves_icall_System_Threading_Thread_Yield (void) { #ifdef HOST_WIN32 return SwitchToThread (); #else return sched_yield () == 0; #endif } /* * mono_thread_get_name: * * Return the name of the thread. NAME_LEN is set to the length of the name. * Return NULL if the thread has no name. The returned memory is owned by the * caller. */ gunichar2* mono_thread_get_name (MonoInternalThread *this_obj, guint32 *name_len) { gunichar2 *res; ensure_synch_cs_set (this_obj); EnterCriticalSection (this_obj->synch_cs); if (!this_obj->name) { *name_len = 0; res = NULL; } else { *name_len = this_obj->name_len; res = g_new (gunichar2, this_obj->name_len); memcpy (res, this_obj->name, sizeof (gunichar2) * this_obj->name_len); } LeaveCriticalSection (this_obj->synch_cs); return res; } MonoString* ves_icall_System_Threading_Thread_GetName_internal (MonoInternalThread *this_obj) { MonoString* str; ensure_synch_cs_set (this_obj); EnterCriticalSection (this_obj->synch_cs); if (!this_obj->name) str = NULL; else str = mono_string_new_utf16 (mono_domain_get (), this_obj->name, this_obj->name_len); LeaveCriticalSection (this_obj->synch_cs); return str; } void ves_icall_System_Threading_Thread_SetName_internal (MonoInternalThread *this_obj, MonoString *name) { ensure_synch_cs_set (this_obj); EnterCriticalSection (this_obj->synch_cs); if (this_obj->name) { LeaveCriticalSection (this_obj->synch_cs); mono_raise_exception (mono_get_exception_invalid_operation ("Thread.Name can only be set once.")); return; } if (name) { this_obj->name = g_new (gunichar2, mono_string_length (name)); memcpy (this_obj->name, mono_string_chars (name), mono_string_length (name) * 2); this_obj->name_len = mono_string_length (name); } else this_obj->name = NULL; LeaveCriticalSection (this_obj->synch_cs); if (this_obj->name) { char *tname = mono_string_to_utf8 (name); mono_profiler_thread_name (this_obj->tid, tname); mono_free (tname); } } /* If the array is already in the requested domain, we just return it, otherwise we return a copy in that domain. */ static MonoArray* byte_array_to_domain (MonoArray *arr, MonoDomain *domain) { MonoArray *copy; if (!arr) return NULL; if (mono_object_domain (arr) == domain) return arr; copy = mono_array_new (domain, mono_defaults.byte_class, arr->max_length); memcpy (mono_array_addr (copy, guint8, 0), mono_array_addr (arr, guint8, 0), arr->max_length); return copy; } MonoArray* ves_icall_System_Threading_Thread_ByteArrayToRootDomain (MonoArray *arr) { return byte_array_to_domain (arr, mono_get_root_domain ()); } MonoArray* ves_icall_System_Threading_Thread_ByteArrayToCurrentDomain (MonoArray *arr) { return byte_array_to_domain (arr, mono_domain_get ()); } MonoThread * mono_thread_current (void) { MonoDomain *domain = mono_domain_get (); MonoInternalThread *internal = mono_thread_internal_current (); MonoThread **current_thread_ptr; g_assert (internal); current_thread_ptr = get_current_thread_ptr_for_domain (domain, internal); if (!*current_thread_ptr) { g_assert (domain != mono_get_root_domain ()); *current_thread_ptr = new_thread_with_internal (domain, internal); } return *current_thread_ptr; } MonoInternalThread* mono_thread_internal_current (void) { MonoInternalThread *res = GET_CURRENT_OBJECT (); THREAD_DEBUG (g_message ("%s: returning %p", __func__, res)); return res; } gboolean ves_icall_System_Threading_Thread_Join_internal(MonoInternalThread *this, int ms, HANDLE thread) { MonoInternalThread *cur_thread = mono_thread_internal_current (); gboolean ret; mono_thread_current_check_pending_interrupt (); ensure_synch_cs_set (this); EnterCriticalSection (this->synch_cs); if ((this->state & ThreadState_Unstarted) != 0) { LeaveCriticalSection (this->synch_cs); mono_raise_exception (mono_get_exception_thread_state ("Thread has not been started.")); return FALSE; } LeaveCriticalSection (this->synch_cs); if(ms== -1) { ms=INFINITE; } THREAD_DEBUG (g_message ("%s: joining thread handle %p, %d ms", __func__, thread, ms)); mono_thread_set_state (cur_thread, ThreadState_WaitSleepJoin); ret=WaitForSingleObjectEx (thread, ms, TRUE); mono_thread_clr_state (cur_thread, ThreadState_WaitSleepJoin); if(ret==WAIT_OBJECT_0) { THREAD_DEBUG (g_message ("%s: join successful", __func__)); return(TRUE); } THREAD_DEBUG (g_message ("%s: join failed", __func__)); return(FALSE); } /* FIXME: exitContext isnt documented */ gboolean ves_icall_System_Threading_WaitHandle_WaitAll_internal(MonoArray *mono_handles, gint32 ms, gboolean exitContext) { HANDLE *handles; guint32 numhandles; guint32 ret; guint32 i; MonoObject *waitHandle; MonoInternalThread *thread = mono_thread_internal_current (); /* Do this WaitSleepJoin check before creating objects */ mono_thread_current_check_pending_interrupt (); numhandles = mono_array_length(mono_handles); handles = g_new0(HANDLE, numhandles); for(i = 0; i < numhandles; i++) { waitHandle = mono_array_get(mono_handles, MonoObject*, i); handles [i] = mono_wait_handle_get_handle ((MonoWaitHandle *) waitHandle); } if(ms== -1) { ms=INFINITE; } mono_thread_set_state (thread, ThreadState_WaitSleepJoin); ret=WaitForMultipleObjectsEx(numhandles, handles, TRUE, ms, TRUE); mono_thread_clr_state (thread, ThreadState_WaitSleepJoin); g_free(handles); if(ret==WAIT_FAILED) { THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Wait failed", __func__, GetCurrentThreadId ())); return(FALSE); } else if(ret==WAIT_TIMEOUT || ret == WAIT_IO_COMPLETION) { /* Do we want to try again if we get * WAIT_IO_COMPLETION? The documentation for * WaitHandle doesn't give any clues. (We'd have to * fiddle with the timeout if we retry.) */ THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Wait timed out", __func__, GetCurrentThreadId ())); return(FALSE); } return(TRUE); } /* FIXME: exitContext isnt documented */ gint32 ves_icall_System_Threading_WaitHandle_WaitAny_internal(MonoArray *mono_handles, gint32 ms, gboolean exitContext) { HANDLE handles [MAXIMUM_WAIT_OBJECTS]; guint32 numhandles; guint32 ret; guint32 i; MonoObject *waitHandle; MonoInternalThread *thread = mono_thread_internal_current (); guint32 start; /* Do this WaitSleepJoin check before creating objects */ mono_thread_current_check_pending_interrupt (); numhandles = mono_array_length(mono_handles); if (numhandles > MAXIMUM_WAIT_OBJECTS) return WAIT_FAILED; for(i = 0; i < numhandles; i++) { waitHandle = mono_array_get(mono_handles, MonoObject*, i); handles [i] = mono_wait_handle_get_handle ((MonoWaitHandle *) waitHandle); } if(ms== -1) { ms=INFINITE; } mono_thread_set_state (thread, ThreadState_WaitSleepJoin); start = (ms == -1) ? 0 : mono_msec_ticks (); do { ret = WaitForMultipleObjectsEx (numhandles, handles, FALSE, ms, TRUE); if (ret != WAIT_IO_COMPLETION) break; if (ms != -1) { guint32 diff; diff = mono_msec_ticks () - start; ms -= diff; if (ms <= 0) break; } } while (ms == -1 || ms > 0); mono_thread_clr_state (thread, ThreadState_WaitSleepJoin); THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") returning %d", __func__, GetCurrentThreadId (), ret)); /* * These need to be here. See MSDN dos on WaitForMultipleObjects. */ if (ret >= WAIT_OBJECT_0 && ret <= WAIT_OBJECT_0 + numhandles - 1) { return ret - WAIT_OBJECT_0; } else if (ret >= WAIT_ABANDONED_0 && ret <= WAIT_ABANDONED_0 + numhandles - 1) { return ret - WAIT_ABANDONED_0; } else { return ret; } } /* FIXME: exitContext isnt documented */ gboolean ves_icall_System_Threading_WaitHandle_WaitOne_internal(MonoObject *this, HANDLE handle, gint32 ms, gboolean exitContext) { guint32 ret; MonoInternalThread *thread = mono_thread_internal_current (); THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") waiting for %p, %d ms", __func__, GetCurrentThreadId (), handle, ms)); if(ms== -1) { ms=INFINITE; } mono_thread_current_check_pending_interrupt (); mono_thread_set_state (thread, ThreadState_WaitSleepJoin); ret=WaitForSingleObjectEx (handle, ms, TRUE); mono_thread_clr_state (thread, ThreadState_WaitSleepJoin); if(ret==WAIT_FAILED) { THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Wait failed", __func__, GetCurrentThreadId ())); return(FALSE); } else if(ret==WAIT_TIMEOUT || ret == WAIT_IO_COMPLETION) { /* Do we want to try again if we get * WAIT_IO_COMPLETION? The documentation for * WaitHandle doesn't give any clues. (We'd have to * fiddle with the timeout if we retry.) */ THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Wait timed out", __func__, GetCurrentThreadId ())); return(FALSE); } return(TRUE); } gboolean ves_icall_System_Threading_WaitHandle_SignalAndWait_Internal (HANDLE toSignal, HANDLE toWait, gint32 ms, gboolean exitContext) { guint32 ret; MonoInternalThread *thread = mono_thread_internal_current (); MONO_ARCH_SAVE_REGS; if (ms == -1) ms = INFINITE; mono_thread_current_check_pending_interrupt (); mono_thread_set_state (thread, ThreadState_WaitSleepJoin); ret = SignalObjectAndWait (toSignal, toWait, ms, TRUE); mono_thread_clr_state (thread, ThreadState_WaitSleepJoin); return (!(ret == WAIT_TIMEOUT || ret == WAIT_IO_COMPLETION || ret == WAIT_FAILED)); } HANDLE ves_icall_System_Threading_Mutex_CreateMutex_internal (MonoBoolean owned, MonoString *name, MonoBoolean *created) { HANDLE mutex; MONO_ARCH_SAVE_REGS; *created = TRUE; if (name == NULL) { mutex = CreateMutex (NULL, owned, NULL); } else { mutex = CreateMutex (NULL, owned, mono_string_chars (name)); if (GetLastError () == ERROR_ALREADY_EXISTS) { *created = FALSE; } } return(mutex); } MonoBoolean ves_icall_System_Threading_Mutex_ReleaseMutex_internal (HANDLE handle ) { MONO_ARCH_SAVE_REGS; return(ReleaseMutex (handle)); } HANDLE ves_icall_System_Threading_Mutex_OpenMutex_internal (MonoString *name, gint32 rights, gint32 *error) { HANDLE ret; MONO_ARCH_SAVE_REGS; *error = ERROR_SUCCESS; ret = OpenMutex (rights, FALSE, mono_string_chars (name)); if (ret == NULL) { *error = GetLastError (); } return(ret); } HANDLE ves_icall_System_Threading_Semaphore_CreateSemaphore_internal (gint32 initialCount, gint32 maximumCount, MonoString *name, MonoBoolean *created) { HANDLE sem; MONO_ARCH_SAVE_REGS; *created = TRUE; if (name == NULL) { sem = CreateSemaphore (NULL, initialCount, maximumCount, NULL); } else { sem = CreateSemaphore (NULL, initialCount, maximumCount, mono_string_chars (name)); if (GetLastError () == ERROR_ALREADY_EXISTS) { *created = FALSE; } } return(sem); } gint32 ves_icall_System_Threading_Semaphore_ReleaseSemaphore_internal (HANDLE handle, gint32 releaseCount, MonoBoolean *fail) { gint32 prevcount; MONO_ARCH_SAVE_REGS; *fail = !ReleaseSemaphore (handle, releaseCount, &prevcount); return (prevcount); } HANDLE ves_icall_System_Threading_Semaphore_OpenSemaphore_internal (MonoString *name, gint32 rights, gint32 *error) { HANDLE ret; MONO_ARCH_SAVE_REGS; *error = ERROR_SUCCESS; ret = OpenSemaphore (rights, FALSE, mono_string_chars (name)); if (ret == NULL) { *error = GetLastError (); } return(ret); } HANDLE ves_icall_System_Threading_Events_CreateEvent_internal (MonoBoolean manual, MonoBoolean initial, MonoString *name, MonoBoolean *created) { HANDLE event; MONO_ARCH_SAVE_REGS; *created = TRUE; if (name == NULL) { event = CreateEvent (NULL, manual, initial, NULL); } else { event = CreateEvent (NULL, manual, initial, mono_string_chars (name)); if (GetLastError () == ERROR_ALREADY_EXISTS) { *created = FALSE; } } return(event); } gboolean ves_icall_System_Threading_Events_SetEvent_internal (HANDLE handle) { MONO_ARCH_SAVE_REGS; return (SetEvent(handle)); } gboolean ves_icall_System_Threading_Events_ResetEvent_internal (HANDLE handle) { MONO_ARCH_SAVE_REGS; return (ResetEvent(handle)); } void ves_icall_System_Threading_Events_CloseEvent_internal (HANDLE handle) { MONO_ARCH_SAVE_REGS; CloseHandle (handle); } HANDLE ves_icall_System_Threading_Events_OpenEvent_internal (MonoString *name, gint32 rights, gint32 *error) { HANDLE ret; MONO_ARCH_SAVE_REGS; *error = ERROR_SUCCESS; ret = OpenEvent (rights, FALSE, mono_string_chars (name)); if (ret == NULL) { *error = GetLastError (); } return(ret); } gint32 ves_icall_System_Threading_Interlocked_Increment_Int (gint32 *location) { MONO_ARCH_SAVE_REGS; return InterlockedIncrement (location); } gint64 ves_icall_System_Threading_Interlocked_Increment_Long (gint64 *location) { gint64 ret; MONO_ARCH_SAVE_REGS; mono_interlocked_lock (); ret = ++ *location; mono_interlocked_unlock (); return ret; } gint32 ves_icall_System_Threading_Interlocked_Decrement_Int (gint32 *location) { MONO_ARCH_SAVE_REGS; return InterlockedDecrement(location); } gint64 ves_icall_System_Threading_Interlocked_Decrement_Long (gint64 * location) { gint64 ret; MONO_ARCH_SAVE_REGS; mono_interlocked_lock (); ret = -- *location; mono_interlocked_unlock (); return ret; } gint32 ves_icall_System_Threading_Interlocked_Exchange_Int (gint32 *location, gint32 value) { MONO_ARCH_SAVE_REGS; return InterlockedExchange(location, value); } MonoObject * ves_icall_System_Threading_Interlocked_Exchange_Object (MonoObject **location, MonoObject *value) { MonoObject *res; res = (MonoObject *) InterlockedExchangePointer((gpointer *) location, value); mono_gc_wbarrier_generic_nostore (location); return res; } gpointer ves_icall_System_Threading_Interlocked_Exchange_IntPtr (gpointer *location, gpointer value) { return InterlockedExchangePointer(location, value); } gfloat ves_icall_System_Threading_Interlocked_Exchange_Single (gfloat *location, gfloat value) { IntFloatUnion val, ret; MONO_ARCH_SAVE_REGS; val.fval = value; ret.ival = InterlockedExchange((gint32 *) location, val.ival); return ret.fval; } gint64 ves_icall_System_Threading_Interlocked_Exchange_Long (gint64 *location, gint64 value) { #if SIZEOF_VOID_P == 8 return (gint64) InterlockedExchangePointer((gpointer *) location, (gpointer)value); #else gint64 res; /* * According to MSDN, this function is only atomic with regards to the * other Interlocked functions on 32 bit platforms. */ mono_interlocked_lock (); res = *location; *location = value; mono_interlocked_unlock (); return res; #endif } gdouble ves_icall_System_Threading_Interlocked_Exchange_Double (gdouble *location, gdouble value) { #if SIZEOF_VOID_P == 8 LongDoubleUnion val, ret; val.fval = value; ret.ival = (gint64)InterlockedExchangePointer((gpointer *) location, (gpointer)val.ival); return ret.fval; #else gdouble res; /* * According to MSDN, this function is only atomic with regards to the * other Interlocked functions on 32 bit platforms. */ mono_interlocked_lock (); res = *location; *location = value; mono_interlocked_unlock (); return res; #endif } gint32 ves_icall_System_Threading_Interlocked_CompareExchange_Int(gint32 *location, gint32 value, gint32 comparand) { MONO_ARCH_SAVE_REGS; return InterlockedCompareExchange(location, value, comparand); } MonoObject * ves_icall_System_Threading_Interlocked_CompareExchange_Object (MonoObject **location, MonoObject *value, MonoObject *comparand) { MonoObject *res; res = (MonoObject *) InterlockedCompareExchangePointer((gpointer *) location, value, comparand); mono_gc_wbarrier_generic_nostore (location); return res; } gpointer ves_icall_System_Threading_Interlocked_CompareExchange_IntPtr(gpointer *location, gpointer value, gpointer comparand) { return InterlockedCompareExchangePointer(location, value, comparand); } gfloat ves_icall_System_Threading_Interlocked_CompareExchange_Single (gfloat *location, gfloat value, gfloat comparand) { IntFloatUnion val, ret, cmp; MONO_ARCH_SAVE_REGS; val.fval = value; cmp.fval = comparand; ret.ival = InterlockedCompareExchange((gint32 *) location, val.ival, cmp.ival); return ret.fval; } gdouble ves_icall_System_Threading_Interlocked_CompareExchange_Double (gdouble *location, gdouble value, gdouble comparand) { #if SIZEOF_VOID_P == 8 LongDoubleUnion val, comp, ret; val.fval = value; comp.fval = comparand; ret.ival = (gint64)InterlockedCompareExchangePointer((gpointer *) location, (gpointer)val.ival, (gpointer)comp.ival); return ret.fval; #else gdouble old; mono_interlocked_lock (); old = *location; if (old == comparand) *location = value; mono_interlocked_unlock (); return old; #endif } gint64 ves_icall_System_Threading_Interlocked_CompareExchange_Long (gint64 *location, gint64 value, gint64 comparand) { #if SIZEOF_VOID_P == 8 return (gint64)InterlockedCompareExchangePointer((gpointer *) location, (gpointer)value, (gpointer)comparand); #else gint64 old; mono_interlocked_lock (); old = *location; if (old == comparand) *location = value; mono_interlocked_unlock (); return old; #endif } MonoObject* ves_icall_System_Threading_Interlocked_CompareExchange_T (MonoObject **location, MonoObject *value, MonoObject *comparand) { MonoObject *res; res = InterlockedCompareExchangePointer ((gpointer *)location, value, comparand); mono_gc_wbarrier_generic_nostore (location); return res; } MonoObject* ves_icall_System_Threading_Interlocked_Exchange_T (MonoObject **location, MonoObject *value) { MonoObject *res; res = InterlockedExchangePointer ((gpointer *)location, value); mono_gc_wbarrier_generic_nostore (location); return res; } gint32 ves_icall_System_Threading_Interlocked_Add_Int (gint32 *location, gint32 value) { #if SIZEOF_VOID_P == 8 /* Should be implemented as a JIT intrinsic */ mono_raise_exception (mono_get_exception_not_implemented (NULL)); return 0; #else gint32 orig; mono_interlocked_lock (); orig = *location; *location = orig + value; mono_interlocked_unlock (); return orig + value; #endif } gint64 ves_icall_System_Threading_Interlocked_Add_Long (gint64 *location, gint64 value) { #if SIZEOF_VOID_P == 8 /* Should be implemented as a JIT intrinsic */ mono_raise_exception (mono_get_exception_not_implemented (NULL)); return 0; #else gint64 orig; mono_interlocked_lock (); orig = *location; *location = orig + value; mono_interlocked_unlock (); return orig + value; #endif } gint64 ves_icall_System_Threading_Interlocked_Read_Long (gint64 *location) { #if SIZEOF_VOID_P == 8 /* 64 bit reads are already atomic */ return *location; #else gint64 res; mono_interlocked_lock (); res = *location; mono_interlocked_unlock (); return res; #endif } void ves_icall_System_Threading_Thread_MemoryBarrier (void) { mono_threads_lock (); mono_threads_unlock (); } void ves_icall_System_Threading_Thread_ClrState (MonoInternalThread* this, guint32 state) { mono_thread_clr_state (this, state); if (state & ThreadState_Background) { /* If the thread changes the background mode, the main thread has to * be notified, since it has to rebuild the list of threads to * wait for. */ SetEvent (background_change_event); } } void ves_icall_System_Threading_Thread_SetState (MonoInternalThread* this, guint32 state) { mono_thread_set_state (this, state); if (state & ThreadState_Background) { /* If the thread changes the background mode, the main thread has to * be notified, since it has to rebuild the list of threads to * wait for. */ SetEvent (background_change_event); } } guint32 ves_icall_System_Threading_Thread_GetState (MonoInternalThread* this) { guint32 state; ensure_synch_cs_set (this); EnterCriticalSection (this->synch_cs); state = this->state; LeaveCriticalSection (this->synch_cs); return state; } void ves_icall_System_Threading_Thread_Interrupt_internal (MonoInternalThread *this) { gboolean throw = FALSE; ensure_synch_cs_set (this); if (this == mono_thread_internal_current ()) return; EnterCriticalSection (this->synch_cs); this->thread_interrupt_requested = TRUE; if (this->state & ThreadState_WaitSleepJoin) { throw = TRUE; } LeaveCriticalSection (this->synch_cs); if (throw) { signal_thread_state_change (this); } } void mono_thread_current_check_pending_interrupt () { MonoInternalThread *thread = mono_thread_internal_current (); gboolean throw = FALSE; mono_debugger_check_interruption (); ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); if (thread->thread_interrupt_requested) { throw = TRUE; thread->thread_interrupt_requested = FALSE; } LeaveCriticalSection (thread->synch_cs); if (throw) { mono_raise_exception (mono_get_exception_thread_interrupted ()); } } int mono_thread_get_abort_signal (void) { #ifdef HOST_WIN32 return -1; #else #ifndef SIGRTMIN #ifdef SIGUSR1 return SIGUSR1; #else return -1; #endif #else static int abort_signum = -1; int i; if (abort_signum != -1) return abort_signum; /* we try to avoid SIGRTMIN and any one that might have been set already, see bug #75387 */ for (i = SIGRTMIN + 1; i < SIGRTMAX; ++i) { struct sigaction sinfo; sigaction (i, NULL, &sinfo); if (sinfo.sa_handler == SIG_DFL && (void*)sinfo.sa_sigaction == (void*)SIG_DFL) { abort_signum = i; return i; } } /* fallback to the old way */ return SIGRTMIN; #endif #endif /* HOST_WIN32 */ } #ifdef HOST_WIN32 static void CALLBACK interruption_request_apc (ULONG_PTR param) { MonoException* exc = mono_thread_request_interruption (FALSE); if (exc) mono_raise_exception (exc); } #endif /* HOST_WIN32 */ /* * signal_thread_state_change * * Tells the thread that his state has changed and it has to enter the new * state as soon as possible. */ static void signal_thread_state_change (MonoInternalThread *thread) { if (thread == mono_thread_internal_current ()) { /* Do it synchronously */ MonoException *exc = mono_thread_request_interruption (FALSE); if (exc) mono_raise_exception (exc); } #ifdef HOST_WIN32 QueueUserAPC ((PAPCFUNC)interruption_request_apc, thread->handle, NULL); #else /* fixme: store the state somewhere */ mono_thread_kill (thread, mono_thread_get_abort_signal ()); /* * This will cause waits to be broken. * It will also prevent the thread from entering a wait, so if the thread returns * from the wait before it receives the abort signal, it will just spin in the wait * functions in the io-layer until the signal handler calls QueueUserAPC which will * make it return. */ wapi_interrupt_thread (thread->handle); #endif /* HOST_WIN32 */ } void ves_icall_System_Threading_Thread_Abort (MonoInternalThread *thread, MonoObject *state) { ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); if ((thread->state & ThreadState_AbortRequested) != 0 || (thread->state & ThreadState_StopRequested) != 0 || (thread->state & ThreadState_Stopped) != 0) { LeaveCriticalSection (thread->synch_cs); return; } if ((thread->state & ThreadState_Unstarted) != 0) { thread->state |= ThreadState_Aborted; LeaveCriticalSection (thread->synch_cs); return; } thread->state |= ThreadState_AbortRequested; if (thread->abort_state_handle) mono_gchandle_free (thread->abort_state_handle); if (state) { thread->abort_state_handle = mono_gchandle_new (state, FALSE); g_assert (thread->abort_state_handle); } else { thread->abort_state_handle = 0; } thread->abort_exc = NULL; /* * abort_exc is set in mono_thread_execute_interruption(), * triggered by the call to signal_thread_state_change(), * below. There's a point between where we have * abort_state_handle set, but abort_exc NULL, but that's not * a problem. */ LeaveCriticalSection (thread->synch_cs); THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Abort requested for %p (%"G_GSIZE_FORMAT")", __func__, GetCurrentThreadId (), thread, (gsize)thread->tid)); /* During shutdown, we can't wait for other threads */ if (!shutting_down) /* Make sure the thread is awake */ mono_thread_resume (thread); signal_thread_state_change (thread); } void ves_icall_System_Threading_Thread_ResetAbort (void) { MonoInternalThread *thread = mono_thread_internal_current (); gboolean was_aborting; ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); was_aborting = thread->state & ThreadState_AbortRequested; thread->state &= ~ThreadState_AbortRequested; LeaveCriticalSection (thread->synch_cs); if (!was_aborting) { const char *msg = "Unable to reset abort because no abort was requested"; mono_raise_exception (mono_get_exception_thread_state (msg)); } thread->abort_exc = NULL; if (thread->abort_state_handle) { mono_gchandle_free (thread->abort_state_handle); /* This is actually not necessary - the handle only counts if the exception is set */ thread->abort_state_handle = 0; } } void mono_thread_internal_reset_abort (MonoInternalThread *thread) { ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); thread->state &= ~ThreadState_AbortRequested; if (thread->abort_exc) { thread->abort_exc = NULL; if (thread->abort_state_handle) { mono_gchandle_free (thread->abort_state_handle); /* This is actually not necessary - the handle only counts if the exception is set */ thread->abort_state_handle = 0; } } LeaveCriticalSection (thread->synch_cs); } MonoObject* ves_icall_System_Threading_Thread_GetAbortExceptionState (MonoThread *this) { MonoInternalThread *thread = this->internal_thread; MonoObject *state, *deserialized = NULL, *exc; MonoDomain *domain; if (!thread->abort_state_handle) return NULL; state = mono_gchandle_get_target (thread->abort_state_handle); g_assert (state); domain = mono_domain_get (); if (mono_object_domain (state) == domain) return state; deserialized = mono_object_xdomain_representation (state, domain, &exc); if (!deserialized) { MonoException *invalid_op_exc = mono_get_exception_invalid_operation ("Thread.ExceptionState cannot access an ExceptionState from a different AppDomain"); if (exc) MONO_OBJECT_SETREF (invalid_op_exc, inner_ex, exc); mono_raise_exception (invalid_op_exc); } return deserialized; } static gboolean mono_thread_suspend (MonoInternalThread *thread) { ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); if ((thread->state & ThreadState_Unstarted) != 0 || (thread->state & ThreadState_Aborted) != 0 || (thread->state & ThreadState_Stopped) != 0) { LeaveCriticalSection (thread->synch_cs); return FALSE; } if ((thread->state & ThreadState_Suspended) != 0 || (thread->state & ThreadState_SuspendRequested) != 0 || (thread->state & ThreadState_StopRequested) != 0) { LeaveCriticalSection (thread->synch_cs); return TRUE; } thread->state |= ThreadState_SuspendRequested; LeaveCriticalSection (thread->synch_cs); signal_thread_state_change (thread); return TRUE; } void ves_icall_System_Threading_Thread_Suspend (MonoInternalThread *thread) { if (!mono_thread_suspend (thread)) mono_raise_exception (mono_get_exception_thread_state ("Thread has not been started, or is dead.")); } static gboolean mono_thread_resume (MonoInternalThread *thread) { ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); if ((thread->state & ThreadState_SuspendRequested) != 0) { thread->state &= ~ThreadState_SuspendRequested; LeaveCriticalSection (thread->synch_cs); return TRUE; } if ((thread->state & ThreadState_Suspended) == 0 || (thread->state & ThreadState_Unstarted) != 0 || (thread->state & ThreadState_Aborted) != 0 || (thread->state & ThreadState_Stopped) != 0) { LeaveCriticalSection (thread->synch_cs); return FALSE; } thread->resume_event = CreateEvent (NULL, TRUE, FALSE, NULL); if (thread->resume_event == NULL) { LeaveCriticalSection (thread->synch_cs); return(FALSE); } /* Awake the thread */ SetEvent (thread->suspend_event); LeaveCriticalSection (thread->synch_cs); /* Wait for the thread to awake */ WaitForSingleObject (thread->resume_event, INFINITE); CloseHandle (thread->resume_event); thread->resume_event = NULL; return TRUE; } void ves_icall_System_Threading_Thread_Resume (MonoThread *thread) { if (!thread->internal_thread || !mono_thread_resume (thread->internal_thread)) mono_raise_exception (mono_get_exception_thread_state ("Thread has not been started, or is dead.")); } static gboolean find_wrapper (MonoMethod *m, gint no, gint ilo, gboolean managed, gpointer data) { if (managed) return TRUE; if (m->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE || m->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE || m->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH) { *((gboolean*)data) = TRUE; return TRUE; } return FALSE; } static gboolean is_running_protected_wrapper (void) { gboolean found = FALSE; mono_stack_walk (find_wrapper, &found); return found; } void mono_thread_internal_stop (MonoInternalThread *thread) { ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); if ((thread->state & ThreadState_StopRequested) != 0 || (thread->state & ThreadState_Stopped) != 0) { LeaveCriticalSection (thread->synch_cs); return; } /* Make sure the thread is awake */ mono_thread_resume (thread); thread->state |= ThreadState_StopRequested; thread->state &= ~ThreadState_AbortRequested; LeaveCriticalSection (thread->synch_cs); signal_thread_state_change (thread); } void mono_thread_stop (MonoThread *thread) { mono_thread_internal_stop (thread->internal_thread); } gint8 ves_icall_System_Threading_Thread_VolatileRead1 (void *ptr) { return *((volatile gint8 *) (ptr)); } gint16 ves_icall_System_Threading_Thread_VolatileRead2 (void *ptr) { return *((volatile gint16 *) (ptr)); } gint32 ves_icall_System_Threading_Thread_VolatileRead4 (void *ptr) { return *((volatile gint32 *) (ptr)); } gint64 ves_icall_System_Threading_Thread_VolatileRead8 (void *ptr) { return *((volatile gint64 *) (ptr)); } void * ves_icall_System_Threading_Thread_VolatileReadIntPtr (void *ptr) { return (void *) *((volatile void **) ptr); } void ves_icall_System_Threading_Thread_VolatileWrite1 (void *ptr, gint8 value) { *((volatile gint8 *) ptr) = value; } void ves_icall_System_Threading_Thread_VolatileWrite2 (void *ptr, gint16 value) { *((volatile gint16 *) ptr) = value; } void ves_icall_System_Threading_Thread_VolatileWrite4 (void *ptr, gint32 value) { *((volatile gint32 *) ptr) = value; } void ves_icall_System_Threading_Thread_VolatileWrite8 (void *ptr, gint64 value) { *((volatile gint64 *) ptr) = value; } void ves_icall_System_Threading_Thread_VolatileWriteIntPtr (void *ptr, void *value) { *((volatile void **) ptr) = value; } void ves_icall_System_Threading_Thread_VolatileWriteObject (void *ptr, void *value) { mono_gc_wbarrier_generic_store (ptr, value); } void mono_thread_init (MonoThreadStartCB start_cb, MonoThreadAttachCB attach_cb) { mono_thread_smr_init (); InitializeCriticalSection(&threads_mutex); InitializeCriticalSection(&interlocked_mutex); InitializeCriticalSection(&contexts_mutex); background_change_event = CreateEvent (NULL, TRUE, FALSE, NULL); g_assert(background_change_event != NULL); mono_init_static_data_info (&thread_static_info); mono_init_static_data_info (&context_static_info); MONO_FAST_TLS_INIT (tls_current_object); current_object_key=TlsAlloc(); THREAD_DEBUG (g_message ("%s: Allocated current_object_key %d", __func__, current_object_key)); mono_thread_start_cb = start_cb; mono_thread_attach_cb = attach_cb; /* Get a pseudo handle to the current process. This is just a * kludge so that wapi can build a process handle if needed. * As a pseudo handle is returned, we don't need to clean * anything up. */ GetCurrentProcess (); } void mono_thread_cleanup (void) { #if !defined(HOST_WIN32) && !defined(RUN_IN_SUBTHREAD) /* The main thread must abandon any held mutexes (particularly * important for named mutexes as they are shared across * processes, see bug 74680.) This will happen when the * thread exits, but if it's not running in a subthread it * won't exit in time. */ /* Using non-w32 API is a nasty kludge, but I couldn't find * anything in the documentation that would let me do this * here yet still be safe to call on windows. */ _wapi_thread_signal_self (mono_environment_exitcode_get ()); #endif #if 0 /* This stuff needs more testing, it seems one of these * critical sections can be locked when mono_thread_cleanup is * called. */ DeleteCriticalSection (&threads_mutex); DeleteCriticalSection (&interlocked_mutex); DeleteCriticalSection (&contexts_mutex); DeleteCriticalSection (&delayed_free_table_mutex); DeleteCriticalSection (&small_id_mutex); CloseHandle (background_change_event); #endif TlsFree (current_object_key); } void mono_threads_install_cleanup (MonoThreadCleanupFunc func) { mono_thread_cleanup_fn = func; } void mono_thread_set_manage_callback (MonoThread *thread, MonoThreadManageCallback func) { thread->internal_thread->manage_callback = func; } void mono_threads_install_notify_pending_exc (MonoThreadNotifyPendingExcFunc func) { mono_thread_notify_pending_exc_fn = func; } G_GNUC_UNUSED static void print_tids (gpointer key, gpointer value, gpointer user) { /* GPOINTER_TO_UINT breaks horribly if sizeof(void *) > * sizeof(uint) and a cast to uint would overflow */ /* Older versions of glib don't have G_GSIZE_FORMAT, so just * print this as a pointer. */ g_message ("Waiting for: %p", key); } struct wait_data { HANDLE handles[MAXIMUM_WAIT_OBJECTS]; MonoInternalThread *threads[MAXIMUM_WAIT_OBJECTS]; guint32 num; }; static void wait_for_tids (struct wait_data *wait, guint32 timeout) { guint32 i, ret; THREAD_DEBUG (g_message("%s: %d threads to wait for in this batch", __func__, wait->num)); ret=WaitForMultipleObjectsEx(wait->num, wait->handles, TRUE, timeout, TRUE); if(ret==WAIT_FAILED) { /* See the comment in build_wait_tids() */ THREAD_DEBUG (g_message ("%s: Wait failed", __func__)); return; } for(i=0; i<wait->num; i++) CloseHandle (wait->handles[i]); if (ret == WAIT_TIMEOUT) return; for(i=0; i<wait->num; i++) { gsize tid = wait->threads[i]->tid; mono_threads_lock (); if(mono_g_hash_table_lookup (threads, (gpointer)tid)!=NULL) { /* This thread must have been killed, because * it hasn't cleaned itself up. (It's just * possible that the thread exited before the * parent thread had a chance to store the * handle, and now there is another pointer to * the already-exited thread stored. In this * case, we'll just get two * mono_profiler_thread_end() calls for the * same thread.) */ mono_threads_unlock (); THREAD_DEBUG (g_message ("%s: cleaning up after thread %p (%"G_GSIZE_FORMAT")", __func__, wait->threads[i], tid)); thread_cleanup (wait->threads[i]); } else { mono_threads_unlock (); } } } static void wait_for_tids_or_state_change (struct wait_data *wait, guint32 timeout) { guint32 i, ret, count; THREAD_DEBUG (g_message("%s: %d threads to wait for in this batch", __func__, wait->num)); /* Add the thread state change event, so it wakes up if a thread changes * to background mode. */ count = wait->num; if (count < MAXIMUM_WAIT_OBJECTS) { wait->handles [count] = background_change_event; count++; } ret=WaitForMultipleObjectsEx (count, wait->handles, FALSE, timeout, TRUE); if(ret==WAIT_FAILED) { /* See the comment in build_wait_tids() */ THREAD_DEBUG (g_message ("%s: Wait failed", __func__)); return; } for(i=0; i<wait->num; i++) CloseHandle (wait->handles[i]); if (ret == WAIT_TIMEOUT) return; if (ret < wait->num) { gsize tid = wait->threads[ret]->tid; mono_threads_lock (); if (mono_g_hash_table_lookup (threads, (gpointer)tid)!=NULL) { /* See comment in wait_for_tids about thread cleanup */ mono_threads_unlock (); THREAD_DEBUG (g_message ("%s: cleaning up after thread %"G_GSIZE_FORMAT, __func__, tid)); thread_cleanup (wait->threads [ret]); } else mono_threads_unlock (); } } static void build_wait_tids (gpointer key, gpointer value, gpointer user) { struct wait_data *wait=(struct wait_data *)user; if(wait->num<MAXIMUM_WAIT_OBJECTS) { HANDLE handle; MonoInternalThread *thread=(MonoInternalThread *)value; /* Ignore background threads, we abort them later */ /* Do not lock here since it is not needed and the caller holds threads_lock */ if (thread->state & ThreadState_Background) { THREAD_DEBUG (g_message ("%s: ignoring background thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid)); return; /* just leave, ignore */ } if (mono_gc_is_finalizer_internal_thread (thread)) { THREAD_DEBUG (g_message ("%s: ignoring finalizer thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid)); return; } if (thread == mono_thread_internal_current ()) { THREAD_DEBUG (g_message ("%s: ignoring current thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid)); return; } if (mono_thread_get_main () && (thread == mono_thread_get_main ()->internal_thread)) { THREAD_DEBUG (g_message ("%s: ignoring main thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid)); return; } if (thread->flags & MONO_THREAD_FLAG_DONT_MANAGE) { THREAD_DEBUG (g_message ("%s: ignoring thread %" G_GSIZE_FORMAT "with DONT_MANAGE flag set.", __func__, (gsize)thread->tid)); return; } handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid); if (handle == NULL) { THREAD_DEBUG (g_message ("%s: ignoring unopenable thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid)); return; } THREAD_DEBUG (g_message ("%s: Invoking mono_thread_manage callback on thread %p", __func__, thread)); if ((thread->manage_callback == NULL) || (thread->manage_callback (thread->root_domain_thread) == TRUE)) { wait->handles[wait->num]=handle; wait->threads[wait->num]=thread; wait->num++; THREAD_DEBUG (g_message ("%s: adding thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid)); } else { THREAD_DEBUG (g_message ("%s: ignoring (because of callback) thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid)); } } else { /* Just ignore the rest, we can't do anything with * them yet */ } } static gboolean remove_and_abort_threads (gpointer key, gpointer value, gpointer user) { struct wait_data *wait=(struct wait_data *)user; gsize self = GetCurrentThreadId (); MonoInternalThread *thread = value; HANDLE handle; if (wait->num >= MAXIMUM_WAIT_OBJECTS) return FALSE; /* The finalizer thread is not a background thread */ if (thread->tid != self && (thread->state & ThreadState_Background) != 0 && !(thread->flags & MONO_THREAD_FLAG_DONT_MANAGE)) { handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid); if (handle == NULL) return FALSE; /* printf ("A: %d\n", wait->num); */ wait->handles[wait->num]=thread->handle; wait->threads[wait->num]=thread; wait->num++; THREAD_DEBUG (g_print ("%s: Aborting id: %"G_GSIZE_FORMAT"\n", __func__, (gsize)thread->tid)); mono_thread_internal_stop (thread); return TRUE; } return (thread->tid != self && !mono_gc_is_finalizer_internal_thread (thread)); } /** * mono_threads_set_shutting_down: * * Is called by a thread that wants to shut down Mono. If the runtime is already * shutting down, the calling thread is suspended/stopped, and this function never * returns. */ void mono_threads_set_shutting_down (void) { MonoInternalThread *current_thread = mono_thread_internal_current (); mono_threads_lock (); if (shutting_down) { mono_threads_unlock (); /* Make sure we're properly suspended/stopped */ EnterCriticalSection (current_thread->synch_cs); if ((current_thread->state & ThreadState_SuspendRequested) || (current_thread->state & ThreadState_AbortRequested) || (current_thread->state & ThreadState_StopRequested)) { LeaveCriticalSection (current_thread->synch_cs); mono_thread_execute_interruption (current_thread); } else { current_thread->state |= ThreadState_Stopped; LeaveCriticalSection (current_thread->synch_cs); } /*since we're killing the thread, unset the current domain.*/ mono_domain_unset (); /* Wake up other threads potentially waiting for us */ ExitThread (0); } else { shutting_down = TRUE; /* Not really a background state change, but this will * interrupt the main thread if it is waiting for all * the other threads. */ SetEvent (background_change_event); mono_threads_unlock (); } } /** * mono_threads_is_shutting_down: * * Returns whether a thread has commenced shutdown of Mono. Note that * if the function returns FALSE the caller must not assume that * shutdown is not in progress, because the situation might have * changed since the function returned. For that reason this function * is of very limited utility. */ gboolean mono_threads_is_shutting_down (void) { return shutting_down; } void mono_thread_manage (void) { struct wait_data wait_data; struct wait_data *wait = &wait_data; memset (wait, 0, sizeof (struct wait_data)); /* join each thread that's still running */ THREAD_DEBUG (g_message ("%s: Joining each running thread...", __func__)); mono_threads_lock (); if(threads==NULL) { THREAD_DEBUG (g_message("%s: No threads", __func__)); mono_threads_unlock (); return; } mono_threads_unlock (); do { mono_threads_lock (); if (shutting_down) { /* somebody else is shutting down */ mono_threads_unlock (); break; } THREAD_DEBUG (g_message ("%s: There are %d threads to join", __func__, mono_g_hash_table_size (threads)); mono_g_hash_table_foreach (threads, print_tids, NULL)); ResetEvent (background_change_event); wait->num=0; /*We must zero all InternalThread pointers to avoid making the GC unhappy.*/ memset (wait->threads, 0, MAXIMUM_WAIT_OBJECTS * SIZEOF_VOID_P); mono_g_hash_table_foreach (threads, build_wait_tids, wait); mono_threads_unlock (); if(wait->num>0) { /* Something to wait for */ wait_for_tids_or_state_change (wait, INFINITE); } THREAD_DEBUG (g_message ("%s: I have %d threads after waiting.", __func__, wait->num)); } while(wait->num>0); mono_threads_set_shutting_down (); /* No new threads will be created after this point */ mono_runtime_set_shutting_down (); THREAD_DEBUG (g_message ("%s: threadpool cleanup", __func__)); mono_thread_pool_cleanup (); /* * Remove everything but the finalizer thread and self. * Also abort all the background threads * */ do { mono_threads_lock (); wait->num = 0; /*We must zero all InternalThread pointers to avoid making the GC unhappy.*/ memset (wait->threads, 0, MAXIMUM_WAIT_OBJECTS * SIZEOF_VOID_P); mono_g_hash_table_foreach_remove (threads, remove_and_abort_threads, wait); mono_threads_unlock (); THREAD_DEBUG (g_message ("%s: wait->num is now %d", __func__, wait->num)); if(wait->num>0) { /* Something to wait for */ wait_for_tids (wait, INFINITE); } } while (wait->num > 0); /* * give the subthreads a chance to really quit (this is mainly needed * to get correct user and system times from getrusage/wait/time(1)). * This could be removed if we avoid pthread_detach() and use pthread_join(). */ #ifndef HOST_WIN32 sched_yield (); #endif } static void terminate_thread (gpointer key, gpointer value, gpointer user) { MonoInternalThread *thread=(MonoInternalThread *)value; if(thread->tid != (gsize)user) { /*TerminateThread (thread->handle, -1);*/ } } void mono_thread_abort_all_other_threads (void) { gsize self = GetCurrentThreadId (); mono_threads_lock (); THREAD_DEBUG (g_message ("%s: There are %d threads to abort", __func__, mono_g_hash_table_size (threads)); mono_g_hash_table_foreach (threads, print_tids, NULL)); mono_g_hash_table_foreach (threads, terminate_thread, (gpointer)self); mono_threads_unlock (); } static void collect_threads_for_suspend (gpointer key, gpointer value, gpointer user_data) { MonoInternalThread *thread = (MonoInternalThread*)value; struct wait_data *wait = (struct wait_data*)user_data; HANDLE handle; /* * We try to exclude threads early, to avoid running into the MAXIMUM_WAIT_OBJECTS * limitation. * This needs no locking. */ if ((thread->state & ThreadState_Suspended) != 0 || (thread->state & ThreadState_Stopped) != 0) return; if (wait->num<MAXIMUM_WAIT_OBJECTS) { handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid); if (handle == NULL) return; wait->handles [wait->num] = handle; wait->threads [wait->num] = thread; wait->num++; } } /* * mono_thread_suspend_all_other_threads: * * Suspend all managed threads except the finalizer thread and this thread. It is * not possible to resume them later. */ void mono_thread_suspend_all_other_threads (void) { struct wait_data wait_data; struct wait_data *wait = &wait_data; int i; gsize self = GetCurrentThreadId (); gpointer *events; guint32 eventidx = 0; gboolean starting, finished; memset (wait, 0, sizeof (struct wait_data)); /* * The other threads could be in an arbitrary state at this point, i.e. * they could be starting up, shutting down etc. This means that there could be * threads which are not even in the threads hash table yet. */ /* * First we set a barrier which will be checked by all threads before they * are added to the threads hash table, and they will exit if the flag is set. * This ensures that no threads could be added to the hash later. * We will use shutting_down as the barrier for now. */ g_assert (shutting_down); /* * We make multiple calls to WaitForMultipleObjects since: * - we can only wait for MAXIMUM_WAIT_OBJECTS threads * - some threads could exit without becoming suspended */ finished = FALSE; while (!finished) { /* * Make a copy of the hashtable since we can't do anything with * threads while threads_mutex is held. */ wait->num = 0; /*We must zero all InternalThread pointers to avoid making the GC unhappy.*/ memset (wait->threads, 0, MAXIMUM_WAIT_OBJECTS * SIZEOF_VOID_P); mono_threads_lock (); mono_g_hash_table_foreach (threads, collect_threads_for_suspend, wait); mono_threads_unlock (); events = g_new0 (gpointer, wait->num); eventidx = 0; /* Get the suspended events that we'll be waiting for */ for (i = 0; i < wait->num; ++i) { MonoInternalThread *thread = wait->threads [i]; gboolean signal_suspend = FALSE; if ((thread->tid == self) || mono_gc_is_finalizer_internal_thread (thread) || (thread->flags & MONO_THREAD_FLAG_DONT_MANAGE)) { //CloseHandle (wait->handles [i]); wait->threads [i] = NULL; /* ignore this thread in next loop */ continue; } ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); if (thread->suspended_event == NULL) { thread->suspended_event = CreateEvent (NULL, TRUE, FALSE, NULL); if (thread->suspended_event == NULL) { /* Forget this one and go on to the next */ LeaveCriticalSection (thread->synch_cs); continue; } } if ((thread->state & ThreadState_Suspended) != 0 || (thread->state & ThreadState_StopRequested) != 0 || (thread->state & ThreadState_Stopped) != 0) { LeaveCriticalSection (thread->synch_cs); CloseHandle (wait->handles [i]); wait->threads [i] = NULL; /* ignore this thread in next loop */ continue; } if ((thread->state & ThreadState_SuspendRequested) == 0) signal_suspend = TRUE; events [eventidx++] = thread->suspended_event; /* Convert abort requests into suspend requests */ if ((thread->state & ThreadState_AbortRequested) != 0) thread->state &= ~ThreadState_AbortRequested; thread->state |= ThreadState_SuspendRequested; LeaveCriticalSection (thread->synch_cs); /* Signal the thread to suspend */ if (signal_suspend) signal_thread_state_change (thread); } if (eventidx > 0) { WaitForMultipleObjectsEx (eventidx, events, TRUE, 100, FALSE); for (i = 0; i < wait->num; ++i) { MonoInternalThread *thread = wait->threads [i]; if (thread == NULL) continue; ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); if ((thread->state & ThreadState_Suspended) != 0) { CloseHandle (thread->suspended_event); thread->suspended_event = NULL; } LeaveCriticalSection (thread->synch_cs); } } else { /* * If there are threads which are starting up, we wait until they * are suspended when they try to register in the threads hash. * This is guaranteed to finish, since the threads which can create new * threads get suspended after a while. * FIXME: The finalizer thread can still create new threads. */ mono_threads_lock (); if (threads_starting_up) starting = mono_g_hash_table_size (threads_starting_up) > 0; else starting = FALSE; mono_threads_unlock (); if (starting) Sleep (100); else finished = TRUE; } g_free (events); } } static void collect_threads (gpointer key, gpointer value, gpointer user_data) { MonoInternalThread *thread = (MonoInternalThread*)value; struct wait_data *wait = (struct wait_data*)user_data; HANDLE handle; if (wait->num<MAXIMUM_WAIT_OBJECTS) { handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid); if (handle == NULL) return; wait->handles [wait->num] = handle; wait->threads [wait->num] = thread; wait->num++; } } /** * mono_threads_request_thread_dump: * * Ask all threads except the current to print their stacktrace to stdout. */ void mono_threads_request_thread_dump (void) { struct wait_data wait_data; struct wait_data *wait = &wait_data; int i; memset (wait, 0, sizeof (struct wait_data)); /* * Make a copy of the hashtable since we can't do anything with * threads while threads_mutex is held. */ mono_threads_lock (); mono_g_hash_table_foreach (threads, collect_threads, wait); mono_threads_unlock (); for (i = 0; i < wait->num; ++i) { MonoInternalThread *thread = wait->threads [i]; if (!mono_gc_is_finalizer_internal_thread (thread) && (thread != mono_thread_internal_current ()) && !thread->thread_dump_requested) { thread->thread_dump_requested = TRUE; signal_thread_state_change (thread); } CloseHandle (wait->handles [i]); } } struct ref_stack { gpointer *refs; gint allocated; /* +1 so that refs [allocated] == NULL */ gint bottom; }; typedef struct ref_stack RefStack; static RefStack * ref_stack_new (gint initial_size) { RefStack *rs; initial_size = MAX (initial_size, 16) + 1; rs = g_new0 (RefStack, 1); rs->refs = g_new0 (gpointer, initial_size); rs->allocated = initial_size; return rs; } static void ref_stack_destroy (gpointer ptr) { RefStack *rs = ptr; if (rs != NULL) { g_free (rs->refs); g_free (rs); } } static void ref_stack_push (RefStack *rs, gpointer ptr) { g_assert (rs != NULL); if (rs->bottom >= rs->allocated) { rs->refs = g_realloc (rs->refs, rs->allocated * 2 * sizeof (gpointer) + 1); rs->allocated <<= 1; rs->refs [rs->allocated] = NULL; } rs->refs [rs->bottom++] = ptr; } static void ref_stack_pop (RefStack *rs) { if (rs == NULL || rs->bottom == 0) return; rs->bottom--; rs->refs [rs->bottom] = NULL; } static gboolean ref_stack_find (RefStack *rs, gpointer ptr) { gpointer *refs; if (rs == NULL) return FALSE; for (refs = rs->refs; refs && *refs; refs++) { if (*refs == ptr) return TRUE; } return FALSE; } /* * mono_thread_push_appdomain_ref: * * Register that the current thread may have references to objects in domain * @domain on its stack. Each call to this function should be paired with a * call to pop_appdomain_ref. */ void mono_thread_push_appdomain_ref (MonoDomain *domain) { MonoInternalThread *thread = mono_thread_internal_current (); if (thread) { /* printf ("PUSH REF: %"G_GSIZE_FORMAT" -> %s.\n", (gsize)thread->tid, domain->friendly_name); */ SPIN_LOCK (thread->lock_thread_id); if (thread->appdomain_refs == NULL) thread->appdomain_refs = ref_stack_new (16); ref_stack_push (thread->appdomain_refs, domain); SPIN_UNLOCK (thread->lock_thread_id); } } void mono_thread_pop_appdomain_ref (void) { MonoInternalThread *thread = mono_thread_internal_current (); if (thread) { /* printf ("POP REF: %"G_GSIZE_FORMAT" -> %s.\n", (gsize)thread->tid, ((MonoDomain*)(thread->appdomain_refs->data))->friendly_name); */ SPIN_LOCK (thread->lock_thread_id); ref_stack_pop (thread->appdomain_refs); SPIN_UNLOCK (thread->lock_thread_id); } } gboolean mono_thread_internal_has_appdomain_ref (MonoInternalThread *thread, MonoDomain *domain) { gboolean res; SPIN_LOCK (thread->lock_thread_id); res = ref_stack_find (thread->appdomain_refs, domain); SPIN_UNLOCK (thread->lock_thread_id); return res; } gboolean mono_thread_has_appdomain_ref (MonoThread *thread, MonoDomain *domain) { return mono_thread_internal_has_appdomain_ref (thread->internal_thread, domain); } typedef struct abort_appdomain_data { struct wait_data wait; MonoDomain *domain; } abort_appdomain_data; static void collect_appdomain_thread (gpointer key, gpointer value, gpointer user_data) { MonoInternalThread *thread = (MonoInternalThread*)value; abort_appdomain_data *data = (abort_appdomain_data*)user_data; MonoDomain *domain = data->domain; if (mono_thread_internal_has_appdomain_ref (thread, domain)) { /* printf ("ABORTING THREAD %p BECAUSE IT REFERENCES DOMAIN %s.\n", thread->tid, domain->friendly_name); */ if(data->wait.num<MAXIMUM_WAIT_OBJECTS) { HANDLE handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid); if (handle == NULL) return; data->wait.handles [data->wait.num] = handle; data->wait.threads [data->wait.num] = thread; data->wait.num++; } else { /* Just ignore the rest, we can't do anything with * them yet */ } } } /* * mono_threads_abort_appdomain_threads: * * Abort threads which has references to the given appdomain. */ gboolean mono_threads_abort_appdomain_threads (MonoDomain *domain, int timeout) { abort_appdomain_data user_data; guint32 start_time; int orig_timeout = timeout; int i; THREAD_DEBUG (g_message ("%s: starting abort", __func__)); start_time = mono_msec_ticks (); do { mono_threads_lock (); user_data.domain = domain; user_data.wait.num = 0; /* This shouldn't take any locks */ mono_g_hash_table_foreach (threads, collect_appdomain_thread, &user_data); mono_threads_unlock (); if (user_data.wait.num > 0) { /* Abort the threads outside the threads lock */ for (i = 0; i < user_data.wait.num; ++i) ves_icall_System_Threading_Thread_Abort (user_data.wait.threads [i], NULL); /* * We should wait for the threads either to abort, or to leave the * domain. We can't do the latter, so we wait with a timeout. */ wait_for_tids (&user_data.wait, 100); } /* Update remaining time */ timeout -= mono_msec_ticks () - start_time; start_time = mono_msec_ticks (); if (orig_timeout != -1 && timeout < 0) return FALSE; } while (user_data.wait.num > 0); THREAD_DEBUG (g_message ("%s: abort done", __func__)); return TRUE; } static void clear_cached_culture (gpointer key, gpointer value, gpointer user_data) { MonoInternalThread *thread = (MonoInternalThread*)value; MonoDomain *domain = (MonoDomain*)user_data; int i; /* No locking needed here */ /* FIXME: why no locking? writes to the cache are protected with synch_cs above */ if (thread->cached_culture_info) { for (i = 0; i < NUM_CACHED_CULTURES * 2; ++i) { MonoObject *obj = mono_array_get (thread->cached_culture_info, MonoObject*, i); if (obj && obj->vtable->domain == domain) mono_array_set (thread->cached_culture_info, MonoObject*, i, NULL); } } } /* * mono_threads_clear_cached_culture: * * Clear the cached_current_culture from all threads if it is in the * given appdomain. */ void mono_threads_clear_cached_culture (MonoDomain *domain) { mono_threads_lock (); mono_g_hash_table_foreach (threads, clear_cached_culture, domain); mono_threads_unlock (); } /* * mono_thread_get_undeniable_exception: * * Return an exception which needs to be raised when leaving a catch clause. * This is used for undeniable exception propagation. */ MonoException* mono_thread_get_undeniable_exception (void) { MonoInternalThread *thread = mono_thread_internal_current (); if (thread && thread->abort_exc && !is_running_protected_wrapper ()) { /* * FIXME: Clear the abort exception and return an AppDomainUnloaded * exception if the thread no longer references a dying appdomain. */ thread->abort_exc->trace_ips = NULL; thread->abort_exc->stack_trace = NULL; return thread->abort_exc; } return NULL; } #if MONO_SMALL_CONFIG #define NUM_STATIC_DATA_IDX 4 static const int static_data_size [NUM_STATIC_DATA_IDX] = { 64, 256, 1024, 4096 }; #else #define NUM_STATIC_DATA_IDX 8 static const int static_data_size [NUM_STATIC_DATA_IDX] = { 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216 }; #endif static uintptr_t* static_reference_bitmaps [NUM_STATIC_DATA_IDX]; #ifdef HAVE_SGEN_GC static void mark_tls_slots (void *addr, MonoGCMarkFunc mark_func) { int i; gpointer *static_data = addr; for (i = 0; i < NUM_STATIC_DATA_IDX; ++i) { int j, numwords; void **ptr; if (!static_data [i]) continue; numwords = 1 + static_data_size [i] / sizeof (gpointer) / (sizeof(uintptr_t) * 8); ptr = static_data [i]; for (j = 0; j < numwords; ++j, ptr += sizeof (uintptr_t) * 8) { uintptr_t bmap = static_reference_bitmaps [i][j]; void ** p = ptr; while (bmap) { if ((bmap & 1) && *p) { mark_func (p); } p++; bmap >>= 1; } } } } #endif /* * mono_alloc_static_data * * Allocate memory blocks for storing threads or context static data */ static void mono_alloc_static_data (gpointer **static_data_ptr, guint32 offset, gboolean threadlocal) { guint idx = (offset >> 24) - 1; int i; gpointer* static_data = *static_data_ptr; if (!static_data) { static void* tls_desc = NULL; #ifdef HAVE_SGEN_GC if (!tls_desc) tls_desc = mono_gc_make_root_descr_user (mark_tls_slots); #endif static_data = mono_gc_alloc_fixed (static_data_size [0], threadlocal?tls_desc:NULL); *static_data_ptr = static_data; static_data [0] = static_data; } for (i = 1; i <= idx; ++i) { if (static_data [i]) continue; #ifdef HAVE_SGEN_GC static_data [i] = threadlocal?g_malloc0 (static_data_size [i]):mono_gc_alloc_fixed (static_data_size [i], NULL); #else static_data [i] = mono_gc_alloc_fixed (static_data_size [i], NULL); #endif } } static void mono_free_static_data (gpointer* static_data, gboolean threadlocal) { int i; for (i = 1; i < NUM_STATIC_DATA_IDX; ++i) { if (!static_data [i]) continue; #ifdef HAVE_SGEN_GC if (threadlocal) g_free (static_data [i]); else mono_gc_free_fixed (static_data [i]); #else mono_gc_free_fixed (static_data [i]); #endif } mono_gc_free_fixed (static_data); } /* * mono_init_static_data_info * * Initializes static data counters */ static void mono_init_static_data_info (StaticDataInfo *static_data) { static_data->idx = 0; static_data->offset = 0; static_data->freelist = NULL; } /* * mono_alloc_static_data_slot * * Generates an offset for static data. static_data contains the counters * used to generate it. */ static guint32 mono_alloc_static_data_slot (StaticDataInfo *static_data, guint32 size, guint32 align) { guint32 offset; if (!static_data->idx && !static_data->offset) { /* * we use the first chunk of the first allocation also as * an array for the rest of the data */ static_data->offset = sizeof (gpointer) * NUM_STATIC_DATA_IDX; } static_data->offset += align - 1; static_data->offset &= ~(align - 1); if (static_data->offset + size >= static_data_size [static_data->idx]) { static_data->idx ++; g_assert (size <= static_data_size [static_data->idx]); g_assert (static_data->idx < NUM_STATIC_DATA_IDX); static_data->offset = 0; } offset = static_data->offset | ((static_data->idx + 1) << 24); static_data->offset += size; return offset; } /* * ensure thread static fields already allocated are valid for thread * This function is called when a thread is created or on thread attach. */ static void thread_adjust_static_data (MonoInternalThread *thread) { guint32 offset; mono_threads_lock (); if (thread_static_info.offset || thread_static_info.idx > 0) { /* get the current allocated size */ offset = thread_static_info.offset | ((thread_static_info.idx + 1) << 24); mono_alloc_static_data (&(thread->static_data), offset, TRUE); } mono_threads_unlock (); } static void alloc_thread_static_data_helper (gpointer key, gpointer value, gpointer user) { MonoInternalThread *thread = value; guint32 offset = GPOINTER_TO_UINT (user); mono_alloc_static_data (&(thread->static_data), offset, TRUE); } static MonoThreadDomainTls* search_tls_slot_in_freelist (StaticDataInfo *static_data, guint32 size, guint32 align) { MonoThreadDomainTls* prev = NULL; MonoThreadDomainTls* tmp = static_data->freelist; while (tmp) { if (tmp->size == size) { if (prev) prev->next = tmp->next; else static_data->freelist = tmp->next; return tmp; } tmp = tmp->next; } return NULL; } static void update_tls_reference_bitmap (guint32 offset, uintptr_t *bitmap, int max_set) { int i; int idx = (offset >> 24) - 1; uintptr_t *rb; if (!static_reference_bitmaps [idx]) static_reference_bitmaps [idx] = g_new0 (uintptr_t, 1 + static_data_size [idx] / sizeof(gpointer) / (sizeof(uintptr_t) * 8)); rb = static_reference_bitmaps [idx]; offset &= 0xffffff; offset /= sizeof (gpointer); /* offset is now the bitmap offset */ for (i = 0; i < max_set; ++i) { if (bitmap [i / sizeof (uintptr_t)] & (1L << (i & (sizeof (uintptr_t) * 8 -1)))) rb [(offset + i) / (sizeof (uintptr_t) * 8)] |= (1L << ((offset + i) & (sizeof (uintptr_t) * 8 -1))); } } static void clear_reference_bitmap (guint32 offset, guint32 size) { int idx = (offset >> 24) - 1; uintptr_t *rb; rb = static_reference_bitmaps [idx]; offset &= 0xffffff; offset /= sizeof (gpointer); size /= sizeof (gpointer); size += offset; /* offset is now the bitmap offset */ for (; offset < size; ++offset) rb [offset / (sizeof (uintptr_t) * 8)] &= ~(1L << (offset & (sizeof (uintptr_t) * 8 -1))); } /* * The offset for a special static variable is composed of three parts: * a bit that indicates the type of static data (0:thread, 1:context), * an index in the array of chunks of memory for the thread (thread->static_data) * and an offset in that chunk of mem. This allows allocating less memory in the * common case. */ guint32 mono_alloc_special_static_data (guint32 static_type, guint32 size, guint32 align, uintptr_t *bitmap, int max_set) { guint32 offset; if (static_type == SPECIAL_STATIC_THREAD) { MonoThreadDomainTls *item; mono_threads_lock (); item = search_tls_slot_in_freelist (&thread_static_info, size, align); /*g_print ("TLS alloc: %d in domain %p (total: %d), cached: %p\n", size, mono_domain_get (), thread_static_info.offset, item);*/ if (item) { offset = item->offset; g_free (item); } else { offset = mono_alloc_static_data_slot (&thread_static_info, size, align); } update_tls_reference_bitmap (offset, bitmap, max_set); /* This can be called during startup */ if (threads != NULL) mono_g_hash_table_foreach (threads, alloc_thread_static_data_helper, GUINT_TO_POINTER (offset)); mono_threads_unlock (); } else { g_assert (static_type == SPECIAL_STATIC_CONTEXT); mono_contexts_lock (); offset = mono_alloc_static_data_slot (&context_static_info, size, align); mono_contexts_unlock (); offset |= 0x80000000; /* Set the high bit to indicate context static data */ } return offset; } gpointer mono_get_special_static_data_for_thread (MonoInternalThread *thread, guint32 offset) { /* The high bit means either thread (0) or static (1) data. */ guint32 static_type = (offset & 0x80000000); int idx; offset &= 0x7fffffff; idx = (offset >> 24) - 1; if (static_type == 0) { return get_thread_static_data (thread, offset); } else { /* Allocate static data block under demand, since we don't have a list // of contexts */ MonoAppContext *context = mono_context_get (); if (!context->static_data || !context->static_data [idx]) { mono_contexts_lock (); mono_alloc_static_data (&(context->static_data), offset, FALSE); mono_contexts_unlock (); } return ((char*) context->static_data [idx]) + (offset & 0xffffff); } } gpointer mono_get_special_static_data (guint32 offset) { return mono_get_special_static_data_for_thread (mono_thread_internal_current (), offset); } typedef struct { guint32 offset; guint32 size; } TlsOffsetSize; static void free_thread_static_data_helper (gpointer key, gpointer value, gpointer user) { MonoInternalThread *thread = value; TlsOffsetSize *data = user; int idx = (data->offset >> 24) - 1; char *ptr; if (!thread->static_data || !thread->static_data [idx]) return; ptr = ((char*) thread->static_data [idx]) + (data->offset & 0xffffff); memset (ptr, 0, data->size); } static void do_free_special_slot (guint32 offset, guint32 size) { guint32 static_type = (offset & 0x80000000); /*g_print ("free %s , size: %d, offset: %x\n", field->name, size, offset);*/ if (static_type == 0) { TlsOffsetSize data; MonoThreadDomainTls *item = g_new0 (MonoThreadDomainTls, 1); data.offset = offset & 0x7fffffff; data.size = size; clear_reference_bitmap (data.offset, data.size); if (threads != NULL) mono_g_hash_table_foreach (threads, free_thread_static_data_helper, &data); item->offset = offset; item->size = size; if (!mono_runtime_is_shutting_down ()) { item->next = thread_static_info.freelist; thread_static_info.freelist = item; } else { /* We could be called during shutdown after mono_thread_cleanup () is called */ g_free (item); } } else { /* FIXME: free context static data as well */ } } static void do_free_special (gpointer key, gpointer value, gpointer data) { MonoClassField *field = key; guint32 offset = GPOINTER_TO_UINT (value); gint32 align; guint32 size; size = mono_type_size (field->type, &align); do_free_special_slot (offset, size); } void mono_alloc_special_static_data_free (GHashTable *special_static_fields) { mono_threads_lock (); g_hash_table_foreach (special_static_fields, do_free_special, NULL); mono_threads_unlock (); } void mono_special_static_data_free_slot (guint32 offset, guint32 size) { mono_threads_lock (); do_free_special_slot (offset, size); mono_threads_unlock (); } /* * allocates room in the thread local area for storing an instance of the struct type * the allocation is kept track of in domain->tlsrec_list. */ uint32_t mono_thread_alloc_tls (MonoReflectionType *type) { MonoDomain *domain = mono_domain_get (); MonoClass *klass; MonoTlsDataRecord *tlsrec; int max_set = 0; gsize *bitmap; gsize default_bitmap [4] = {0}; uint32_t tls_offset; guint32 size; gint32 align; klass = mono_class_from_mono_type (type->type); /* TlsDatum is a struct, so we subtract the object header size offset */ bitmap = mono_class_compute_bitmap (klass, default_bitmap, sizeof (default_bitmap) * 8, - (int)(sizeof (MonoObject) / sizeof (gpointer)), &max_set, FALSE); size = mono_type_size (type->type, &align); tls_offset = mono_alloc_special_static_data (SPECIAL_STATIC_THREAD, size, align, bitmap, max_set); if (bitmap != default_bitmap) g_free (bitmap); tlsrec = g_new0 (MonoTlsDataRecord, 1); tlsrec->tls_offset = tls_offset; tlsrec->size = size; mono_domain_lock (domain); tlsrec->next = domain->tlsrec_list; domain->tlsrec_list = tlsrec; mono_domain_unlock (domain); return tls_offset; } void mono_thread_destroy_tls (uint32_t tls_offset) { MonoTlsDataRecord *prev = NULL; MonoTlsDataRecord *cur; guint32 size = 0; MonoDomain *domain = mono_domain_get (); mono_domain_lock (domain); cur = domain->tlsrec_list; while (cur) { if (cur->tls_offset == tls_offset) { if (prev) prev->next = cur->next; else domain->tlsrec_list = cur->next; size = cur->size; g_free (cur); break; } prev = cur; cur = cur->next; } mono_domain_unlock (domain); if (size) mono_special_static_data_free_slot (tls_offset, size); } /* * This is just to ensure cleanup: the finalizers should have taken care, so this is not perf-critical. */ void mono_thread_destroy_domain_tls (MonoDomain *domain) { while (domain->tlsrec_list) mono_thread_destroy_tls (domain->tlsrec_list->tls_offset); } static MonoClassField *local_slots = NULL; typedef struct { /* local tls data to get locals_slot from a thread */ guint32 offset; int idx; /* index in the locals_slot array */ int slot; } LocalSlotID; static void clear_local_slot (gpointer key, gpointer value, gpointer user_data) { LocalSlotID *sid = user_data; MonoInternalThread *thread = (MonoInternalThread*)value; MonoArray *slots_array; /* * the static field is stored at: ((char*) thread->static_data [idx]) + (offset & 0xffffff); * it is for the right domain, so we need to check if it is allocated an initialized * for the current thread. */ /*g_print ("handling thread %p\n", thread);*/ if (!thread->static_data || !thread->static_data [sid->idx]) return; slots_array = *(MonoArray **)(((char*) thread->static_data [sid->idx]) + (sid->offset & 0xffffff)); if (!slots_array || sid->slot >= mono_array_length (slots_array)) return; mono_array_set (slots_array, MonoObject*, sid->slot, NULL); } void mono_thread_free_local_slot_values (int slot, MonoBoolean thread_local) { MonoDomain *domain; LocalSlotID sid; sid.slot = slot; if (thread_local) { void *addr = NULL; if (!local_slots) { local_slots = mono_class_get_field_from_name (mono_defaults.thread_class, "local_slots"); if (!local_slots) { g_warning ("local_slots field not found in Thread class"); return; } } domain = mono_domain_get (); mono_domain_lock (domain); if (domain->special_static_fields) addr = g_hash_table_lookup (domain->special_static_fields, local_slots); mono_domain_unlock (domain); if (!addr) return; /*g_print ("freeing slot %d at %p\n", slot, addr);*/ sid.offset = GPOINTER_TO_UINT (addr); sid.offset &= 0x7fffffff; sid.idx = (sid.offset >> 24) - 1; mono_threads_lock (); mono_g_hash_table_foreach (threads, clear_local_slot, &sid); mono_threads_unlock (); } else { /* FIXME: clear the slot for MonoAppContexts, too */ } } #ifdef HOST_WIN32 static void CALLBACK dummy_apc (ULONG_PTR param) { } #else static guint32 dummy_apc (gpointer param) { return 0; } #endif /* * mono_thread_execute_interruption * * Performs the operation that the requested thread state requires (abort, * suspend or stop) */ static MonoException* mono_thread_execute_interruption (MonoInternalThread *thread) { ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); /* MonoThread::interruption_requested can only be changed with atomics */ if (InterlockedCompareExchange (&thread->interruption_requested, FALSE, TRUE)) { /* this will consume pending APC calls */ WaitForSingleObjectEx (GetCurrentThread(), 0, TRUE); InterlockedDecrement (&thread_interruption_requested); #ifndef HOST_WIN32 /* Clear the interrupted flag of the thread so it can wait again */ wapi_clear_interruption (); #endif } if ((thread->state & ThreadState_AbortRequested) != 0) { LeaveCriticalSection (thread->synch_cs); if (thread->abort_exc == NULL) { /* * This might be racy, but it has to be called outside the lock * since it calls managed code. */ MONO_OBJECT_SETREF (thread, abort_exc, mono_get_exception_thread_abort ()); } return thread->abort_exc; } else if ((thread->state & ThreadState_SuspendRequested) != 0) { thread->state &= ~ThreadState_SuspendRequested; thread->state |= ThreadState_Suspended; thread->suspend_event = CreateEvent (NULL, TRUE, FALSE, NULL); if (thread->suspend_event == NULL) { LeaveCriticalSection (thread->synch_cs); return(NULL); } if (thread->suspended_event) SetEvent (thread->suspended_event); LeaveCriticalSection (thread->synch_cs); if (shutting_down) { /* After we left the lock, the runtime might shut down so everything becomes invalid */ for (;;) Sleep (1000); } WaitForSingleObject (thread->suspend_event, INFINITE); EnterCriticalSection (thread->synch_cs); CloseHandle (thread->suspend_event); thread->suspend_event = NULL; thread->state &= ~ThreadState_Suspended; /* The thread that requested the resume will have replaced this event * and will be waiting for it */ SetEvent (thread->resume_event); LeaveCriticalSection (thread->synch_cs); return NULL; } else if ((thread->state & ThreadState_StopRequested) != 0) { /* FIXME: do this through the JIT? */ LeaveCriticalSection (thread->synch_cs); mono_thread_exit (); return NULL; } else if (thread->thread_interrupt_requested) { thread->thread_interrupt_requested = FALSE; LeaveCriticalSection (thread->synch_cs); return(mono_get_exception_thread_interrupted ()); } LeaveCriticalSection (thread->synch_cs); return NULL; } /* * mono_thread_request_interruption * * A signal handler can call this method to request the interruption of a * thread. The result of the interruption will depend on the current state of * the thread. If the result is an exception that needs to be throw, it is * provided as return value. */ MonoException* mono_thread_request_interruption (gboolean running_managed) { MonoInternalThread *thread = mono_thread_internal_current (); /* The thread may already be stopping */ if (thread == NULL) return NULL; #ifdef HOST_WIN32 if (thread->interrupt_on_stop && thread->state & ThreadState_StopRequested && thread->state & ThreadState_Background) ExitThread (1); #endif if (InterlockedCompareExchange (&thread->interruption_requested, 1, 0) == 1) return NULL; if (!running_managed || is_running_protected_wrapper ()) { /* Can't stop while in unmanaged code. Increase the global interruption request count. When exiting the unmanaged method the count will be checked and the thread will be interrupted. */ InterlockedIncrement (&thread_interruption_requested); if (mono_thread_notify_pending_exc_fn && !running_managed) /* The JIT will notify the thread about the interruption */ /* This shouldn't take any locks */ mono_thread_notify_pending_exc_fn (); /* this will awake the thread if it is in WaitForSingleObject or similar */ /* Our implementation of this function ignores the func argument */ QueueUserAPC ((PAPCFUNC)dummy_apc, thread->handle, NULL); return NULL; } else { return mono_thread_execute_interruption (thread); } } /*This function should be called by a thread after it has exited all of * its handle blocks at interruption time.*/ MonoException* mono_thread_resume_interruption (void) { MonoInternalThread *thread = mono_thread_internal_current (); gboolean still_aborting; /* The thread may already be stopping */ if (thread == NULL) return NULL; ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); still_aborting = (thread->state & ThreadState_AbortRequested) != 0; LeaveCriticalSection (thread->synch_cs); /*This can happen if the protected block called Thread::ResetAbort*/ if (!still_aborting) return FALSE; if (InterlockedCompareExchange (&thread->interruption_requested, 1, 0) == 1) return NULL; InterlockedIncrement (&thread_interruption_requested); #ifndef HOST_WIN32 wapi_self_interrupt (); #endif return mono_thread_execute_interruption (thread); } gboolean mono_thread_interruption_requested () { if (thread_interruption_requested) { MonoInternalThread *thread = mono_thread_internal_current (); /* The thread may already be stopping */ if (thread != NULL) return (thread->interruption_requested); } return FALSE; } static void mono_thread_interruption_checkpoint_request (gboolean bypass_abort_protection) { MonoInternalThread *thread = mono_thread_internal_current (); /* The thread may already be stopping */ if (thread == NULL) return; mono_debugger_check_interruption (); if (thread->interruption_requested && (bypass_abort_protection || !is_running_protected_wrapper ())) { MonoException* exc = mono_thread_execute_interruption (thread); if (exc) mono_raise_exception (exc); } } /* * Performs the interruption of the current thread, if one has been requested, * and the thread is not running a protected wrapper. */ void mono_thread_interruption_checkpoint () { mono_thread_interruption_checkpoint_request (FALSE); } /* * Performs the interruption of the current thread, if one has been requested. */ void mono_thread_force_interruption_checkpoint () { mono_thread_interruption_checkpoint_request (TRUE); } /* * mono_thread_get_and_clear_pending_exception: * * Return any pending exceptions for the current thread and clear it as a side effect. */ MonoException* mono_thread_get_and_clear_pending_exception (void) { MonoInternalThread *thread = mono_thread_internal_current (); /* The thread may already be stopping */ if (thread == NULL) return NULL; if (thread->interruption_requested && !is_running_protected_wrapper ()) { return mono_thread_execute_interruption (thread); } if (thread->pending_exception) { MonoException *exc = thread->pending_exception; thread->pending_exception = NULL; return exc; } return NULL; } /* * mono_set_pending_exception: * * Set the pending exception of the current thread to EXC. On platforms which * support it, the exception will be thrown when execution returns to managed code. * On other platforms, this function is equivalent to mono_raise_exception (). * Internal calls which report exceptions using this function instead of * raise_exception () might be called by JITted code using a more efficient calling * convention. */ void mono_set_pending_exception (MonoException *exc) { MonoInternalThread *thread = mono_thread_internal_current (); /* The thread may already be stopping */ if (thread == NULL) return; if (mono_thread_notify_pending_exc_fn) { MONO_OBJECT_SETREF (thread, pending_exception, exc); mono_thread_notify_pending_exc_fn (); } else { /* No way to notify the JIT about the exception, have to throw it now */ mono_raise_exception (exc); } } /** * mono_thread_interruption_request_flag: * * Returns the address of a flag that will be non-zero if an interruption has * been requested for a thread. The thread to interrupt may not be the current * thread, so an additional call to mono_thread_interruption_requested() or * mono_thread_interruption_checkpoint() is allways needed if the flag is not * zero. */ gint32* mono_thread_interruption_request_flag () { return &thread_interruption_requested; } void mono_thread_init_apartment_state (void) { #ifdef HOST_WIN32 MonoInternalThread* thread = mono_thread_internal_current (); /* Positive return value indicates success, either * S_OK if this is first CoInitialize call, or * S_FALSE if CoInitialize already called, but with same * threading model. A negative value indicates failure, * probably due to trying to change the threading model. */ if (CoInitializeEx(NULL, (thread->apartment_state == ThreadApartmentState_STA) ? COINIT_APARTMENTTHREADED : COINIT_MULTITHREADED) < 0) { thread->apartment_state = ThreadApartmentState_Unknown; } #endif } void mono_thread_cleanup_apartment_state (void) { #ifdef HOST_WIN32 MonoInternalThread* thread = mono_thread_internal_current (); if (thread && thread->apartment_state != ThreadApartmentState_Unknown) { CoUninitialize (); } #endif } void mono_thread_set_state (MonoInternalThread *thread, MonoThreadState state) { ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); thread->state |= state; LeaveCriticalSection (thread->synch_cs); } void mono_thread_clr_state (MonoInternalThread *thread, MonoThreadState state) { ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); thread->state &= ~state; LeaveCriticalSection (thread->synch_cs); } gboolean mono_thread_test_state (MonoInternalThread *thread, MonoThreadState test) { gboolean ret = FALSE; ensure_synch_cs_set (thread); EnterCriticalSection (thread->synch_cs); if ((thread->state & test) != 0) { ret = TRUE; } LeaveCriticalSection (thread->synch_cs); return ret; } static MonoClassField *execution_context_field; static MonoObject** get_execution_context_addr (void) { MonoDomain *domain = mono_domain_get (); guint32 offset; if (!execution_context_field) { execution_context_field = mono_class_get_field_from_name (mono_defaults.thread_class, "_ec"); g_assert (execution_context_field); } g_assert (mono_class_try_get_vtable (domain, mono_defaults.appdomain_class)); mono_domain_lock (domain); offset = GPOINTER_TO_UINT (g_hash_table_lookup (domain->special_static_fields, execution_context_field)); mono_domain_unlock (domain); g_assert (offset); return (MonoObject**) mono_get_special_static_data (offset); } MonoObject* mono_thread_get_execution_context (void) { return *get_execution_context_addr (); } void mono_thread_set_execution_context (MonoObject *ec) { *get_execution_context_addr () = ec; } static gboolean has_tls_get = FALSE; void mono_runtime_set_has_tls_get (gboolean val) { has_tls_get = val; } gboolean mono_runtime_has_tls_get (void) { return has_tls_get; } int mono_thread_kill (MonoInternalThread *thread, int signal) { #ifdef HOST_WIN32 /* Win32 uses QueueUserAPC and callers of this are guarded */ g_assert_not_reached (); #else # ifdef PTHREAD_POINTER_ID return pthread_kill ((gpointer)(gsize)(thread->tid), mono_thread_get_abort_signal ()); # else # ifdef PLATFORM_ANDROID if (thread->android_tid != 0) { int ret; int old_errno = errno; ret = tkill ((pid_t) thread->android_tid, signal); if (ret < 0) { ret = errno; errno = old_errno; } return ret; } else return pthread_kill (thread->tid, mono_thread_get_abort_signal ()); # else return pthread_kill (thread->tid, mono_thread_get_abort_signal ()); # endif # endif #endif }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3434_0
crossvul-cpp_data_good_3433_1
/* * metadata/gc.c: GC icalls. * * Author: Paolo Molaro <lupus@ximian.com> * * Copyright 2002-2003 Ximian, Inc (http://www.ximian.com) * Copyright 2004-2009 Novell, Inc (http://www.novell.com) */ #include <config.h> #include <glib.h> #include <string.h> #include <errno.h> #include <mono/metadata/gc-internal.h> #include <mono/metadata/mono-gc.h> #include <mono/metadata/threads.h> #include <mono/metadata/tabledefs.h> #include <mono/metadata/exception.h> #include <mono/metadata/profiler-private.h> #include <mono/metadata/domain-internals.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/metadata-internals.h> #include <mono/metadata/mono-mlist.h> #include <mono/metadata/threadpool.h> #include <mono/metadata/threads-types.h> #include <mono/utils/mono-logger-internal.h> #include <mono/metadata/gc-internal.h> #include <mono/metadata/marshal.h> /* for mono_delegate_free_ftnptr () */ #include <mono/metadata/attach.h> #include <mono/metadata/console-io.h> #include <mono/utils/mono-semaphore.h> #ifndef HOST_WIN32 #include <pthread.h> #endif typedef struct DomainFinalizationReq { MonoDomain *domain; HANDLE done_event; } DomainFinalizationReq; #ifdef PLATFORM_WINCE /* FIXME: add accessors to gc.dll API */ extern void (*__imp_GC_finalizer_notifier)(void); #define GC_finalizer_notifier __imp_GC_finalizer_notifier extern int __imp_GC_finalize_on_demand; #define GC_finalize_on_demand __imp_GC_finalize_on_demand #endif static gboolean gc_disabled = FALSE; static gboolean finalizing_root_domain = FALSE; #define mono_finalizer_lock() EnterCriticalSection (&finalizer_mutex) #define mono_finalizer_unlock() LeaveCriticalSection (&finalizer_mutex) static CRITICAL_SECTION finalizer_mutex; static CRITICAL_SECTION reference_queue_mutex; static GSList *domains_to_finalize= NULL; static MonoMList *threads_to_finalize = NULL; static MonoInternalThread *gc_thread; static void object_register_finalizer (MonoObject *obj, void (*callback)(void *, void*)); static void mono_gchandle_set_target (guint32 gchandle, MonoObject *obj); static void reference_queue_proccess_all (void); #ifndef HAVE_NULL_GC static HANDLE pending_done_event; static HANDLE shutdown_event; #endif static void add_thread_to_finalize (MonoInternalThread *thread) { mono_finalizer_lock (); if (!threads_to_finalize) MONO_GC_REGISTER_ROOT_SINGLE (threads_to_finalize); threads_to_finalize = mono_mlist_append (threads_to_finalize, (MonoObject*)thread); mono_finalizer_unlock (); } static gboolean suspend_finalizers = FALSE; /* * actually, we might want to queue the finalize requests in a separate thread, * but we need to be careful about the execution domain of the thread... */ void mono_gc_run_finalize (void *obj, void *data) { MonoObject *exc = NULL; MonoObject *o; #ifndef HAVE_SGEN_GC MonoObject *o2; #endif MonoMethod* finalizer = NULL; MonoDomain *caller_domain = mono_domain_get (); MonoDomain *domain; RuntimeInvokeFunction runtime_invoke; GSList *l, *refs = NULL; o = (MonoObject*)((char*)obj + GPOINTER_TO_UINT (data)); if (suspend_finalizers) return; domain = o->vtable->domain; #ifndef HAVE_SGEN_GC mono_domain_finalizers_lock (domain); o2 = g_hash_table_lookup (domain->finalizable_objects_hash, o); refs = mono_gc_remove_weak_track_object (domain, o); mono_domain_finalizers_unlock (domain); if (!o2) /* Already finalized somehow */ return; #endif if (refs) { /* * Support for GCHandles of type WeakTrackResurrection: * * Its not exactly clear how these are supposed to work, or how their * semantics can be implemented. We only implement one crucial thing: * these handles are only cleared after the finalizer has ran. */ for (l = refs; l; l = l->next) { guint32 gchandle = GPOINTER_TO_UINT (l->data); mono_gchandle_set_target (gchandle, o); } g_slist_free (refs); } /* make sure the finalizer is not called again if the object is resurrected */ object_register_finalizer (obj, NULL); if (o->vtable->klass == mono_defaults.internal_thread_class) { MonoInternalThread *t = (MonoInternalThread*)o; if (mono_gc_is_finalizer_internal_thread (t)) /* Avoid finalizing ourselves */ return; if (t->threadpool_thread && finalizing_root_domain) { /* Don't finalize threadpool threads when shutting down - they're finalized when the threadpool shuts down. */ add_thread_to_finalize (t); return; } } if (o->vtable->klass->image == mono_defaults.corlib && !strcmp (o->vtable->klass->name, "DynamicMethod") && finalizing_root_domain) { /* * These can't be finalized during unloading/shutdown, since that would * free the native code which can still be referenced by other * finalizers. * FIXME: This is not perfect, objects dying at the same time as * dynamic methods can still reference them even when !shutdown. */ return; } if (mono_runtime_get_no_exec ()) return; /* speedup later... and use a timeout */ /* g_print ("Finalize run on %p %s.%s\n", o, mono_object_class (o)->name_space, mono_object_class (o)->name); */ /* Use _internal here, since this thread can enter a doomed appdomain */ mono_domain_set_internal (mono_object_domain (o)); /* delegates that have a native function pointer allocated are * registered for finalization, but they don't have a Finalize * method, because in most cases it's not needed and it's just a waste. */ if (o->vtable->klass->delegate) { MonoDelegate* del = (MonoDelegate*)o; if (del->delegate_trampoline) mono_delegate_free_ftnptr ((MonoDelegate*)o); mono_domain_set_internal (caller_domain); return; } finalizer = mono_class_get_finalizer (o->vtable->klass); #ifndef DISABLE_COM /* If object has a CCW but has no finalizer, it was only * registered for finalization in order to free the CCW. * Else it needs the regular finalizer run. * FIXME: what to do about ressurection and suppression * of finalizer on object with CCW. */ if (mono_marshal_free_ccw (o) && !finalizer) { mono_domain_set_internal (caller_domain); return; } #endif /* * To avoid the locking plus the other overhead of mono_runtime_invoke (), * create and precompile a wrapper which calls the finalize method using * a CALLVIRT. */ if (!domain->finalize_runtime_invoke) { MonoMethod *invoke = mono_marshal_get_runtime_invoke (mono_class_get_method_from_name_flags (mono_defaults.object_class, "Finalize", 0, 0), TRUE); domain->finalize_runtime_invoke = mono_compile_method (invoke); } runtime_invoke = domain->finalize_runtime_invoke; mono_runtime_class_init (o->vtable); runtime_invoke (o, NULL, &exc, NULL); if (exc) { /* fixme: do something useful */ } mono_domain_set_internal (caller_domain); } void mono_gc_finalize_threadpool_threads (void) { while (threads_to_finalize) { MonoInternalThread *thread = (MonoInternalThread*) mono_mlist_get_data (threads_to_finalize); /* Force finalization of the thread. */ thread->threadpool_thread = FALSE; mono_object_register_finalizer ((MonoObject*)thread); mono_gc_run_finalize (thread, NULL); threads_to_finalize = mono_mlist_next (threads_to_finalize); } } gpointer mono_gc_out_of_memory (size_t size) { /* * we could allocate at program startup some memory that we could release * back to the system at this point if we're really low on memory (ie, size is * lower than the memory we set apart) */ mono_raise_exception (mono_domain_get ()->out_of_memory_ex); return NULL; } /* * Some of our objects may point to a different address than the address returned by GC_malloc() * (because of the GetHashCode hack), but we need to pass the real address to register_finalizer. * This also means that in the callback we need to adjust the pointer to get back the real * MonoObject*. * We also need to be consistent in the use of the GC_debug* variants of malloc and register_finalizer, * since that, too, can cause the underlying pointer to be offset. */ static void object_register_finalizer (MonoObject *obj, void (*callback)(void *, void*)) { #if HAVE_BOEHM_GC guint offset = 0; MonoDomain *domain; if (obj == NULL) mono_raise_exception (mono_get_exception_argument_null ("obj")); domain = obj->vtable->domain; #ifndef GC_DEBUG /* This assertion is not valid when GC_DEBUG is defined */ g_assert (GC_base (obj) == (char*)obj - offset); #endif if (mono_domain_is_unloading (domain) && (callback != NULL)) /* * Can't register finalizers in a dying appdomain, since they * could be invoked after the appdomain has been unloaded. */ return; mono_domain_finalizers_lock (domain); if (callback) g_hash_table_insert (domain->finalizable_objects_hash, obj, obj); else g_hash_table_remove (domain->finalizable_objects_hash, obj); mono_domain_finalizers_unlock (domain); GC_REGISTER_FINALIZER_NO_ORDER ((char*)obj - offset, callback, GUINT_TO_POINTER (offset), NULL, NULL); #elif defined(HAVE_SGEN_GC) if (obj == NULL) mono_raise_exception (mono_get_exception_argument_null ("obj")); /* * If we register finalizers for domains that are unloading we might * end up running them while or after the domain is being cleared, so * the objects will not be valid anymore. */ if (!mono_domain_is_unloading (obj->vtable->domain)) mono_gc_register_for_finalization (obj, callback); #endif } /** * mono_object_register_finalizer: * @obj: object to register * * Records that object @obj has a finalizer, this will call the * Finalize method when the garbage collector disposes the object. * */ void mono_object_register_finalizer (MonoObject *obj) { /* g_print ("Registered finalizer on %p %s.%s\n", obj, mono_object_class (obj)->name_space, mono_object_class (obj)->name); */ object_register_finalizer (obj, mono_gc_run_finalize); } /** * mono_domain_finalize: * @domain: the domain to finalize * @timeout: msects to wait for the finalization to complete, -1 to wait indefinitely * * Request finalization of all finalizable objects inside @domain. Wait * @timeout msecs for the finalization to complete. * * Returns: TRUE if succeeded, FALSE if there was a timeout */ gboolean mono_domain_finalize (MonoDomain *domain, guint32 timeout) { DomainFinalizationReq *req; guint32 res; HANDLE done_event; if (mono_thread_internal_current () == gc_thread) /* We are called from inside a finalizer, not much we can do here */ return FALSE; /* * No need to create another thread 'cause the finalizer thread * is still working and will take care of running the finalizers */ #ifndef HAVE_NULL_GC if (gc_disabled) return TRUE; mono_gc_collect (mono_gc_max_generation ()); done_event = CreateEvent (NULL, TRUE, FALSE, NULL); if (done_event == NULL) { return FALSE; } req = g_new0 (DomainFinalizationReq, 1); req->domain = domain; req->done_event = done_event; if (domain == mono_get_root_domain ()) finalizing_root_domain = TRUE; mono_finalizer_lock (); domains_to_finalize = g_slist_append (domains_to_finalize, req); mono_finalizer_unlock (); /* Tell the finalizer thread to finalize this appdomain */ mono_gc_finalize_notify (); if (timeout == -1) timeout = INFINITE; res = WaitForSingleObjectEx (done_event, timeout, TRUE); /* printf ("WAIT RES: %d.\n", res); */ if (res == WAIT_TIMEOUT) { /* We leak the handle here */ return FALSE; } CloseHandle (done_event); if (domain == mono_get_root_domain ()) { mono_thread_pool_cleanup (); mono_gc_finalize_threadpool_threads (); } return TRUE; #else /* We don't support domain finalization without a GC */ return FALSE; #endif } void ves_icall_System_GC_InternalCollect (int generation) { mono_gc_collect (generation); } gint64 ves_icall_System_GC_GetTotalMemory (MonoBoolean forceCollection) { MONO_ARCH_SAVE_REGS; if (forceCollection) mono_gc_collect (mono_gc_max_generation ()); return mono_gc_get_used_size (); } void ves_icall_System_GC_KeepAlive (MonoObject *obj) { MONO_ARCH_SAVE_REGS; /* * Does nothing. */ } void ves_icall_System_GC_ReRegisterForFinalize (MonoObject *obj) { if (!obj) mono_raise_exception (mono_get_exception_argument_null ("obj")); object_register_finalizer (obj, mono_gc_run_finalize); } void ves_icall_System_GC_SuppressFinalize (MonoObject *obj) { if (!obj) mono_raise_exception (mono_get_exception_argument_null ("obj")); /* delegates have no finalizers, but we register them to deal with the * unmanaged->managed trampoline. We don't let the user suppress it * otherwise we'd leak it. */ if (obj->vtable->klass->delegate) return; /* FIXME: Need to handle case where obj has COM Callable Wrapper * generated for it that needs cleaned up, but user wants to suppress * their derived object finalizer. */ object_register_finalizer (obj, NULL); } void ves_icall_System_GC_WaitForPendingFinalizers (void) { #ifndef HAVE_NULL_GC if (!mono_gc_pending_finalizers ()) return; if (mono_thread_internal_current () == gc_thread) /* Avoid deadlocks */ return; ResetEvent (pending_done_event); mono_gc_finalize_notify (); /* g_print ("Waiting for pending finalizers....\n"); */ WaitForSingleObjectEx (pending_done_event, INFINITE, TRUE); /* g_print ("Done pending....\n"); */ #endif } void ves_icall_System_GC_register_ephemeron_array (MonoObject *array) { #ifdef HAVE_SGEN_GC if (!mono_gc_ephemeron_array_add (array)) mono_raise_exception (mono_object_domain (array)->out_of_memory_ex); #endif } MonoObject* ves_icall_System_GC_get_ephemeron_tombstone (void) { return mono_domain_get ()->ephemeron_tombstone; } #define mono_allocator_lock() EnterCriticalSection (&allocator_section) #define mono_allocator_unlock() LeaveCriticalSection (&allocator_section) static CRITICAL_SECTION allocator_section; static CRITICAL_SECTION handle_section; typedef enum { HANDLE_WEAK, HANDLE_WEAK_TRACK, HANDLE_NORMAL, HANDLE_PINNED } HandleType; static HandleType mono_gchandle_get_type (guint32 gchandle); MonoObject * ves_icall_System_GCHandle_GetTarget (guint32 handle) { return mono_gchandle_get_target (handle); } /* * if type == -1, change the target of the handle, otherwise allocate a new handle. */ guint32 ves_icall_System_GCHandle_GetTargetHandle (MonoObject *obj, guint32 handle, gint32 type) { if (type == -1) { mono_gchandle_set_target (handle, obj); /* the handle doesn't change */ return handle; } switch (type) { case HANDLE_WEAK: return mono_gchandle_new_weakref (obj, FALSE); case HANDLE_WEAK_TRACK: return mono_gchandle_new_weakref (obj, TRUE); case HANDLE_NORMAL: return mono_gchandle_new (obj, FALSE); case HANDLE_PINNED: return mono_gchandle_new (obj, TRUE); default: g_assert_not_reached (); } return 0; } void ves_icall_System_GCHandle_FreeHandle (guint32 handle) { mono_gchandle_free (handle); } gpointer ves_icall_System_GCHandle_GetAddrOfPinnedObject (guint32 handle) { MonoObject *obj; if (mono_gchandle_get_type (handle) != HANDLE_PINNED) return (gpointer)-2; obj = mono_gchandle_get_target (handle); if (obj) { MonoClass *klass = mono_object_class (obj); if (klass == mono_defaults.string_class) { return mono_string_chars ((MonoString*)obj); } else if (klass->rank) { return mono_array_addr ((MonoArray*)obj, char, 0); } else { /* the C# code will check and throw the exception */ /* FIXME: missing !klass->blittable test, see bug #61134 */ if ((klass->flags & TYPE_ATTRIBUTE_LAYOUT_MASK) == TYPE_ATTRIBUTE_AUTO_LAYOUT) return (gpointer)-1; return (char*)obj + sizeof (MonoObject); } } return NULL; } typedef struct { guint32 *bitmap; gpointer *entries; guint32 size; guint8 type; guint slot_hint : 24; /* starting slot for search */ /* 2^16 appdomains should be enough for everyone (though I know I'll regret this in 20 years) */ /* we alloc this only for weak refs, since we can get the domain directly in the other cases */ guint16 *domain_ids; } HandleData; /* weak and weak-track arrays will be allocated in malloc memory */ static HandleData gc_handles [] = { {NULL, NULL, 0, HANDLE_WEAK, 0}, {NULL, NULL, 0, HANDLE_WEAK_TRACK, 0}, {NULL, NULL, 0, HANDLE_NORMAL, 0}, {NULL, NULL, 0, HANDLE_PINNED, 0} }; #define lock_handles(handles) EnterCriticalSection (&handle_section) #define unlock_handles(handles) LeaveCriticalSection (&handle_section) static int find_first_unset (guint32 bitmap) { int i; for (i = 0; i < 32; ++i) { if (!(bitmap & (1 << i))) return i; } return -1; } static guint32 alloc_handle (HandleData *handles, MonoObject *obj, gboolean track) { gint slot, i; guint32 res; lock_handles (handles); if (!handles->size) { handles->size = 32; if (handles->type > HANDLE_WEAK_TRACK) { handles->entries = mono_gc_alloc_fixed (sizeof (gpointer) * handles->size, mono_gc_make_root_descr_all_refs (handles->size)); } else { handles->entries = g_malloc0 (sizeof (gpointer) * handles->size); handles->domain_ids = g_malloc0 (sizeof (guint16) * handles->size); } handles->bitmap = g_malloc0 (handles->size / 8); } i = -1; for (slot = handles->slot_hint; slot < handles->size / 32; ++slot) { if (handles->bitmap [slot] != 0xffffffff) { i = find_first_unset (handles->bitmap [slot]); handles->slot_hint = slot; break; } } if (i == -1 && handles->slot_hint != 0) { for (slot = 0; slot < handles->slot_hint; ++slot) { if (handles->bitmap [slot] != 0xffffffff) { i = find_first_unset (handles->bitmap [slot]); handles->slot_hint = slot; break; } } } if (i == -1) { guint32 *new_bitmap; guint32 new_size = handles->size * 2; /* always double: we memset to 0 based on this below */ /* resize and copy the bitmap */ new_bitmap = g_malloc0 (new_size / 8); memcpy (new_bitmap, handles->bitmap, handles->size / 8); g_free (handles->bitmap); handles->bitmap = new_bitmap; /* resize and copy the entries */ if (handles->type > HANDLE_WEAK_TRACK) { gpointer *entries; entries = mono_gc_alloc_fixed (sizeof (gpointer) * new_size, mono_gc_make_root_descr_all_refs (new_size)); memcpy (entries, handles->entries, sizeof (gpointer) * handles->size); mono_gc_free_fixed (handles->entries); handles->entries = entries; } else { gpointer *entries; guint16 *domain_ids; domain_ids = g_malloc0 (sizeof (guint16) * new_size); entries = g_malloc (sizeof (gpointer) * new_size); /* we disable GC because we could lose some disappearing link updates */ mono_gc_disable (); memcpy (entries, handles->entries, sizeof (gpointer) * handles->size); memset (entries + handles->size, 0, sizeof (gpointer) * handles->size); memcpy (domain_ids, handles->domain_ids, sizeof (guint16) * handles->size); for (i = 0; i < handles->size; ++i) { MonoObject *obj = mono_gc_weak_link_get (&(handles->entries [i])); if (handles->entries [i]) mono_gc_weak_link_remove (&(handles->entries [i])); /*g_print ("reg/unreg entry %d of type %d at %p to object %p (%p), was: %p\n", i, handles->type, &(entries [i]), obj, entries [i], handles->entries [i]);*/ if (obj) { mono_gc_weak_link_add (&(entries [i]), obj, track); } } g_free (handles->entries); g_free (handles->domain_ids); handles->entries = entries; handles->domain_ids = domain_ids; mono_gc_enable (); } /* set i and slot to the next free position */ i = 0; slot = (handles->size + 1) / 32; handles->slot_hint = handles->size + 1; handles->size = new_size; } handles->bitmap [slot] |= 1 << i; slot = slot * 32 + i; handles->entries [slot] = obj; if (handles->type <= HANDLE_WEAK_TRACK) { /*FIXME, what to use when obj == null?*/ handles->domain_ids [slot] = (obj ? mono_object_get_domain (obj) : mono_domain_get ())->domain_id; if (obj) mono_gc_weak_link_add (&(handles->entries [slot]), obj, track); } mono_perfcounters->gc_num_handles++; unlock_handles (handles); /*g_print ("allocated entry %d of type %d to object %p (in slot: %p)\n", slot, handles->type, obj, handles->entries [slot]);*/ res = (slot << 3) | (handles->type + 1); mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handles->type, res, obj); return res; } /** * mono_gchandle_new: * @obj: managed object to get a handle for * @pinned: whether the object should be pinned * * This returns a handle that wraps the object, this is used to keep a * reference to a managed object from the unmanaged world and preventing the * object from being disposed. * * If @pinned is false the address of the object can not be obtained, if it is * true the address of the object can be obtained. This will also pin the * object so it will not be possible by a moving garbage collector to move the * object. * * Returns: a handle that can be used to access the object from * unmanaged code. */ guint32 mono_gchandle_new (MonoObject *obj, gboolean pinned) { return alloc_handle (&gc_handles [pinned? HANDLE_PINNED: HANDLE_NORMAL], obj, FALSE); } /** * mono_gchandle_new_weakref: * @obj: managed object to get a handle for * @pinned: whether the object should be pinned * * This returns a weak handle that wraps the object, this is used to * keep a reference to a managed object from the unmanaged world. * Unlike the mono_gchandle_new the object can be reclaimed by the * garbage collector. In this case the value of the GCHandle will be * set to zero. * * If @pinned is false the address of the object can not be obtained, if it is * true the address of the object can be obtained. This will also pin the * object so it will not be possible by a moving garbage collector to move the * object. * * Returns: a handle that can be used to access the object from * unmanaged code. */ guint32 mono_gchandle_new_weakref (MonoObject *obj, gboolean track_resurrection) { guint32 handle = alloc_handle (&gc_handles [track_resurrection? HANDLE_WEAK_TRACK: HANDLE_WEAK], obj, track_resurrection); #ifndef HAVE_SGEN_GC if (track_resurrection) mono_gc_add_weak_track_handle (obj, handle); #endif return handle; } static HandleType mono_gchandle_get_type (guint32 gchandle) { guint type = (gchandle & 7) - 1; return type; } /** * mono_gchandle_get_target: * @gchandle: a GCHandle's handle. * * The handle was previously created by calling mono_gchandle_new or * mono_gchandle_new_weakref. * * Returns a pointer to the MonoObject represented by the handle or * NULL for a collected object if using a weakref handle. */ MonoObject* mono_gchandle_get_target (guint32 gchandle) { guint slot = gchandle >> 3; guint type = (gchandle & 7) - 1; HandleData *handles = &gc_handles [type]; MonoObject *obj = NULL; if (type > 3) return NULL; lock_handles (handles); if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) { if (handles->type <= HANDLE_WEAK_TRACK) { obj = mono_gc_weak_link_get (&handles->entries [slot]); } else { obj = handles->entries [slot]; } } else { /* print a warning? */ } unlock_handles (handles); /*g_print ("get target of entry %d of type %d: %p\n", slot, handles->type, obj);*/ return obj; } static void mono_gchandle_set_target (guint32 gchandle, MonoObject *obj) { guint slot = gchandle >> 3; guint type = (gchandle & 7) - 1; HandleData *handles = &gc_handles [type]; MonoObject *old_obj = NULL; if (type > 3) return; lock_handles (handles); if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) { if (handles->type <= HANDLE_WEAK_TRACK) { old_obj = handles->entries [slot]; if (handles->entries [slot]) mono_gc_weak_link_remove (&handles->entries [slot]); if (obj) mono_gc_weak_link_add (&handles->entries [slot], obj, handles->type == HANDLE_WEAK_TRACK); /*FIXME, what to use when obj == null?*/ handles->domain_ids [slot] = (obj ? mono_object_get_domain (obj) : mono_domain_get ())->domain_id; } else { handles->entries [slot] = obj; } } else { /* print a warning? */ } /*g_print ("changed entry %d of type %d to object %p (in slot: %p)\n", slot, handles->type, obj, handles->entries [slot]);*/ unlock_handles (handles); #ifndef HAVE_SGEN_GC if (type == HANDLE_WEAK_TRACK) mono_gc_change_weak_track_handle (old_obj, obj, gchandle); #endif } /** * mono_gchandle_is_in_domain: * @gchandle: a GCHandle's handle. * @domain: An application domain. * * Returns: true if the object wrapped by the @gchandle belongs to the specific @domain. */ gboolean mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain) { guint slot = gchandle >> 3; guint type = (gchandle & 7) - 1; HandleData *handles = &gc_handles [type]; gboolean result = FALSE; if (type > 3) return FALSE; lock_handles (handles); if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) { if (handles->type <= HANDLE_WEAK_TRACK) { result = domain->domain_id == handles->domain_ids [slot]; } else { MonoObject *obj; obj = handles->entries [slot]; if (obj == NULL) result = TRUE; else result = domain == mono_object_domain (obj); } } else { /* print a warning? */ } unlock_handles (handles); return result; } /** * mono_gchandle_free: * @gchandle: a GCHandle's handle. * * Frees the @gchandle handle. If there are no outstanding * references, the garbage collector can reclaim the memory of the * object wrapped. */ void mono_gchandle_free (guint32 gchandle) { guint slot = gchandle >> 3; guint type = (gchandle & 7) - 1; HandleData *handles = &gc_handles [type]; if (type > 3) return; #ifndef HAVE_SGEN_GC if (type == HANDLE_WEAK_TRACK) mono_gc_remove_weak_track_handle (gchandle); #endif lock_handles (handles); if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) { if (handles->type <= HANDLE_WEAK_TRACK) { if (handles->entries [slot]) mono_gc_weak_link_remove (&handles->entries [slot]); } else { handles->entries [slot] = NULL; } handles->bitmap [slot / 32] &= ~(1 << (slot % 32)); } else { /* print a warning? */ } mono_perfcounters->gc_num_handles--; /*g_print ("freed entry %d of type %d\n", slot, handles->type);*/ unlock_handles (handles); mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handles->type, gchandle, NULL); } /** * mono_gchandle_free_domain: * @domain: domain that is unloading * * Function used internally to cleanup any GC handle for objects belonging * to the specified domain during appdomain unload. */ void mono_gchandle_free_domain (MonoDomain *domain) { guint type; for (type = 0; type < 3; ++type) { guint slot; HandleData *handles = &gc_handles [type]; lock_handles (handles); for (slot = 0; slot < handles->size; ++slot) { if (!(handles->bitmap [slot / 32] & (1 << (slot % 32)))) continue; if (type <= HANDLE_WEAK_TRACK) { if (domain->domain_id == handles->domain_ids [slot]) { handles->bitmap [slot / 32] &= ~(1 << (slot % 32)); if (handles->entries [slot]) mono_gc_weak_link_remove (&handles->entries [slot]); } } else { if (handles->entries [slot] && mono_object_domain (handles->entries [slot]) == domain) { handles->bitmap [slot / 32] &= ~(1 << (slot % 32)); handles->entries [slot] = NULL; } } } unlock_handles (handles); } } MonoBoolean GCHandle_CheckCurrentDomain (guint32 gchandle) { return mono_gchandle_is_in_domain (gchandle, mono_domain_get ()); } #ifndef HAVE_NULL_GC #ifdef MONO_HAS_SEMAPHORES static MonoSemType finalizer_sem; #endif static HANDLE finalizer_event; static volatile gboolean finished=FALSE; void mono_gc_finalize_notify (void) { #ifdef DEBUG g_message ( "%s: prodding finalizer", __func__); #endif #ifdef MONO_HAS_SEMAPHORES MONO_SEM_POST (&finalizer_sem); #else SetEvent (finalizer_event); #endif } #ifdef HAVE_BOEHM_GC static void collect_objects (gpointer key, gpointer value, gpointer user_data) { GPtrArray *arr = (GPtrArray*)user_data; g_ptr_array_add (arr, key); } #endif /* * finalize_domain_objects: * * Run the finalizers of all finalizable objects in req->domain. */ static void finalize_domain_objects (DomainFinalizationReq *req) { MonoDomain *domain = req->domain; #ifdef HAVE_BOEHM_GC while (g_hash_table_size (domain->finalizable_objects_hash) > 0) { int i; GPtrArray *objs; /* * Since the domain is unloading, nobody is allowed to put * new entries into the hash table. But finalize_object might * remove entries from the hash table, so we make a copy. */ objs = g_ptr_array_new (); g_hash_table_foreach (domain->finalizable_objects_hash, collect_objects, objs); /* printf ("FINALIZING %d OBJECTS.\n", objs->len); */ for (i = 0; i < objs->len; ++i) { MonoObject *o = (MonoObject*)g_ptr_array_index (objs, i); /* FIXME: Avoid finalizing threads, etc */ mono_gc_run_finalize (o, 0); } g_ptr_array_free (objs, TRUE); } #elif defined(HAVE_SGEN_GC) #define NUM_FOBJECTS 64 MonoObject *to_finalize [NUM_FOBJECTS]; int count; while ((count = mono_gc_finalizers_for_domain (domain, to_finalize, NUM_FOBJECTS))) { int i; for (i = 0; i < count; ++i) { mono_gc_run_finalize (to_finalize [i], 0); } } #endif /* Process finalizers which are already in the queue */ mono_gc_invoke_finalizers (); /* printf ("DONE.\n"); */ SetEvent (req->done_event); /* The event is closed in mono_domain_finalize if we get here */ g_free (req); } static guint32 finalizer_thread (gpointer unused) { while (!finished) { /* Wait to be notified that there's at least one * finaliser to run */ g_assert (mono_domain_get () == mono_get_root_domain ()); #ifdef MONO_HAS_SEMAPHORES MONO_SEM_WAIT (&finalizer_sem); #else /* Use alertable=FALSE since we will be asked to exit using the event too */ WaitForSingleObjectEx (finalizer_event, INFINITE, FALSE); #endif mono_console_handle_async_ops (); #ifndef DISABLE_ATTACH mono_attach_maybe_start (); #endif reference_queue_proccess_all (); if (domains_to_finalize) { mono_finalizer_lock (); if (domains_to_finalize) { DomainFinalizationReq *req = domains_to_finalize->data; domains_to_finalize = g_slist_remove (domains_to_finalize, req); mono_finalizer_unlock (); finalize_domain_objects (req); } else { mono_finalizer_unlock (); } } /* If finished == TRUE, mono_gc_cleanup has been called (from mono_runtime_cleanup), * before the domain is unloaded. */ mono_gc_invoke_finalizers (); SetEvent (pending_done_event); } SetEvent (shutdown_event); return 0; } void mono_gc_init (void) { InitializeCriticalSection (&handle_section); InitializeCriticalSection (&allocator_section); InitializeCriticalSection (&finalizer_mutex); InitializeCriticalSection (&reference_queue_mutex); MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_NORMAL].entries); MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_PINNED].entries); mono_gc_base_init (); if (mono_gc_is_disabled ()) { gc_disabled = TRUE; return; } finalizer_event = CreateEvent (NULL, FALSE, FALSE, NULL); pending_done_event = CreateEvent (NULL, TRUE, FALSE, NULL); shutdown_event = CreateEvent (NULL, TRUE, FALSE, NULL); if (finalizer_event == NULL || pending_done_event == NULL || shutdown_event == NULL) { g_assert_not_reached (); } #ifdef MONO_HAS_SEMAPHORES MONO_SEM_INIT (&finalizer_sem, 0); #endif gc_thread = mono_thread_create_internal (mono_domain_get (), finalizer_thread, NULL, FALSE); ves_icall_System_Threading_Thread_SetName_internal (gc_thread, mono_string_new (mono_domain_get (), "Finalizer")); } void mono_gc_cleanup (void) { #ifdef DEBUG g_message ("%s: cleaning up finalizer", __func__); #endif if (!gc_disabled) { ResetEvent (shutdown_event); finished = TRUE; if (mono_thread_internal_current () != gc_thread) { mono_gc_finalize_notify (); /* Finishing the finalizer thread, so wait a little bit... */ /* MS seems to wait for about 2 seconds */ if (WaitForSingleObjectEx (shutdown_event, 2000, FALSE) == WAIT_TIMEOUT) { int ret; /* Set a flag which the finalizer thread can check */ suspend_finalizers = TRUE; /* Try to abort the thread, in the hope that it is running managed code */ mono_thread_internal_stop (gc_thread); /* Wait for it to stop */ ret = WaitForSingleObjectEx (gc_thread->handle, 100, TRUE); if (ret == WAIT_TIMEOUT) { /* * The finalizer thread refused to die. There is not much we * can do here, since the runtime is shutting down so the * state the finalizer thread depends on will vanish. */ g_warning ("Shutting down finalizer thread timed out."); } else { /* * FIXME: On unix, when the above wait returns, the thread * might still be running io-layer code, or pthreads code. */ Sleep (100); } } } gc_thread = NULL; #ifdef HAVE_BOEHM_GC GC_finalizer_notifier = NULL; #endif } DeleteCriticalSection (&handle_section); DeleteCriticalSection (&allocator_section); DeleteCriticalSection (&finalizer_mutex); DeleteCriticalSection (&reference_queue_mutex); } #else /* Null GC dummy functions */ void mono_gc_finalize_notify (void) { } void mono_gc_init (void) { InitializeCriticalSection (&handle_section); } void mono_gc_cleanup (void) { } #endif gboolean mono_gc_is_finalizer_internal_thread (MonoInternalThread *thread) { return thread == gc_thread; } /** * mono_gc_is_finalizer_thread: * @thread: the thread to test. * * In Mono objects are finalized asynchronously on a separate thread. * This routine tests whether the @thread argument represents the * finalization thread. * * Returns true if @thread is the finalization thread. */ gboolean mono_gc_is_finalizer_thread (MonoThread *thread) { return mono_gc_is_finalizer_internal_thread (thread->internal_thread); } #if defined(__MACH__) static pthread_t mach_exception_thread; void mono_gc_register_mach_exception_thread (pthread_t thread) { mach_exception_thread = thread; } pthread_t mono_gc_get_mach_exception_thread (void) { return mach_exception_thread; } #endif /** * mono_gc_parse_environment_string_extract_number: * * @str: points to the first digit of the number * @out: pointer to the variable that will receive the value * * Tries to extract a number from the passed string, taking in to account m, k * and g suffixes * * Returns true if passing was successful */ gboolean mono_gc_parse_environment_string_extract_number (const char *str, glong *out) { char *endptr; int len = strlen (str), shift = 0; glong val; gboolean is_suffix = FALSE; char suffix; switch (str [len - 1]) { case 'g': case 'G': shift += 10; case 'm': case 'M': shift += 10; case 'k': case 'K': shift += 10; is_suffix = TRUE; suffix = str [len - 1]; break; } errno = 0; val = strtol (str, &endptr, 10); if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) || (errno != 0 && val == 0) || (endptr == str)) return FALSE; if (is_suffix) { if (*(endptr + 1)) /* Invalid string. */ return FALSE; val <<= shift; } *out = val; return TRUE; } #ifndef HAVE_SGEN_GC void* mono_gc_alloc_mature (MonoVTable *vtable) { return mono_object_new_specific (vtable); } #endif static MonoReferenceQueue *ref_queues; static void ref_list_remove_element (RefQueueEntry **prev, RefQueueEntry *element) { do { /* Guard if head is changed concurrently. */ while (*prev != element) prev = &(*prev)->next; } while (prev && InterlockedCompareExchangePointer ((void*)prev, element->next, element) != element); } static void ref_list_push (RefQueueEntry **head, RefQueueEntry *value) { RefQueueEntry *current; do { current = *head; value->next = current; } while (InterlockedCompareExchangePointer ((void*)head, value, current) != current); } static void reference_queue_proccess (MonoReferenceQueue *queue) { RefQueueEntry **iter = &queue->queue; RefQueueEntry *entry; while ((entry = *iter)) { if (queue->should_be_deleted || !mono_gc_weak_link_get (&entry->dis_link)) { ref_list_remove_element (iter, entry); mono_gc_weak_link_remove (&entry->dis_link); queue->callback (entry->user_data); g_free (entry); } else { iter = &entry->next; } } } static void reference_queue_proccess_all (void) { MonoReferenceQueue **iter; MonoReferenceQueue *queue = ref_queues; for (; queue; queue = queue->next) reference_queue_proccess (queue); restart: EnterCriticalSection (&reference_queue_mutex); for (iter = &ref_queues; *iter;) { queue = *iter; if (!queue->should_be_deleted) { iter = &queue->next; continue; } if (queue->queue) { LeaveCriticalSection (&reference_queue_mutex); reference_queue_proccess (queue); goto restart; } *iter = queue->next; g_free (queue); } LeaveCriticalSection (&reference_queue_mutex); } /** * mono_gc_reference_queue_new: * @callback callback used when processing dead entries. * * Create a new reference queue used to process collected objects. * A reference queue let you queue the pair (managed object, user data). * Once the managed object is collected @callback will be called * in the finalizer thread with 'user data' as argument. * * The callback is called without any locks held. */ MonoReferenceQueue* mono_gc_reference_queue_new (mono_reference_queue_callback callback) { MonoReferenceQueue *res = g_new0 (MonoReferenceQueue, 1); res->callback = callback; EnterCriticalSection (&reference_queue_mutex); res->next = ref_queues; ref_queues = res; LeaveCriticalSection (&reference_queue_mutex); return res; } /** * mono_gc_reference_queue_add: * @queue the queue to add the reference to. * @obj the object to be watched for collection * @user_data parameter to be passed to the queue callback * * Queue an object to be watched for collection. * * @returns false if the queue is scheduled to be freed. */ gboolean mono_gc_reference_queue_add (MonoReferenceQueue *queue, MonoObject *obj, void *user_data) { RefQueueEntry *head; RefQueueEntry *entry; if (queue->should_be_deleted) return FALSE; entry = g_new0 (RefQueueEntry, 1); entry->user_data = user_data; mono_gc_weak_link_add (&entry->dis_link, obj, TRUE); ref_list_push (&queue->queue, entry); return TRUE; } /** * mono_gc_reference_queue_free: * @queue the queue that should be deleted. * * This operation signals that @queue should be deleted. This operation is deferred * as it happens on the finalizer thread. * * After this call, no further objects can be queued. It's the responsibility of the * caller to make sure that no further attempt to access queue will be made. */ void mono_gc_reference_queue_free (MonoReferenceQueue *queue) { queue->should_be_deleted = TRUE; }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3433_1
crossvul-cpp_data_bad_1845_0
/* * * Copyright (C) 2011 Novell Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/splice.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/namei.h> #include "overlayfs.h" #define OVL_COPY_UP_CHUNK_SIZE (1 << 20) int ovl_copy_xattr(struct dentry *old, struct dentry *new) { ssize_t list_size, size; char *buf, *name, *value; int error; if (!old->d_inode->i_op->getxattr || !new->d_inode->i_op->getxattr) return 0; list_size = vfs_listxattr(old, NULL, 0); if (list_size <= 0) { if (list_size == -EOPNOTSUPP) return 0; return list_size; } buf = kzalloc(list_size, GFP_KERNEL); if (!buf) return -ENOMEM; error = -ENOMEM; value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL); if (!value) goto out; list_size = vfs_listxattr(old, buf, list_size); if (list_size <= 0) { error = list_size; goto out_free_value; } for (name = buf; name < (buf + list_size); name += strlen(name) + 1) { size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX); if (size <= 0) { error = size; goto out_free_value; } error = vfs_setxattr(new, name, value, size, 0); if (error) goto out_free_value; } out_free_value: kfree(value); out: kfree(buf); return error; } static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len) { struct file *old_file; struct file *new_file; loff_t old_pos = 0; loff_t new_pos = 0; int error = 0; if (len == 0) return 0; old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY); if (IS_ERR(old_file)) return PTR_ERR(old_file); new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY); if (IS_ERR(new_file)) { error = PTR_ERR(new_file); goto out_fput; } /* FIXME: copy up sparse files efficiently */ while (len) { size_t this_len = OVL_COPY_UP_CHUNK_SIZE; long bytes; if (len < this_len) this_len = len; if (signal_pending_state(TASK_KILLABLE, current)) { error = -EINTR; break; } bytes = do_splice_direct(old_file, &old_pos, new_file, &new_pos, this_len, SPLICE_F_MOVE); if (bytes <= 0) { error = bytes; break; } WARN_ON(old_pos != new_pos); len -= bytes; } fput(new_file); out_fput: fput(old_file); return error; } static char *ovl_read_symlink(struct dentry *realdentry) { int res; char *buf; struct inode *inode = realdentry->d_inode; mm_segment_t old_fs; res = -EINVAL; if (!inode->i_op->readlink) goto err; res = -ENOMEM; buf = (char *) __get_free_page(GFP_KERNEL); if (!buf) goto err; old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ res = inode->i_op->readlink(realdentry, (char __user *)buf, PAGE_SIZE - 1); set_fs(old_fs); if (res < 0) { free_page((unsigned long) buf); goto err; } buf[res] = '\0'; return buf; err: return ERR_PTR(res); } static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat) { struct iattr attr = { .ia_valid = ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET, .ia_atime = stat->atime, .ia_mtime = stat->mtime, }; return notify_change(upperdentry, &attr, NULL); } int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) { int err = 0; if (!S_ISLNK(stat->mode)) { struct iattr attr = { .ia_valid = ATTR_MODE, .ia_mode = stat->mode, }; err = notify_change(upperdentry, &attr, NULL); } if (!err) { struct iattr attr = { .ia_valid = ATTR_UID | ATTR_GID, .ia_uid = stat->uid, .ia_gid = stat->gid, }; err = notify_change(upperdentry, &attr, NULL); } if (!err) ovl_set_timestamps(upperdentry, stat); return err; } static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, struct dentry *dentry, struct path *lowerpath, struct kstat *stat, struct iattr *attr, const char *link) { struct inode *wdir = workdir->d_inode; struct inode *udir = upperdir->d_inode; struct dentry *newdentry = NULL; struct dentry *upper = NULL; umode_t mode = stat->mode; int err; newdentry = ovl_lookup_temp(workdir, dentry); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) goto out; upper = lookup_one_len(dentry->d_name.name, upperdir, dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto out1; /* Can't properly set mode on creation because of the umask */ stat->mode &= S_IFMT; err = ovl_create_real(wdir, newdentry, stat, link, NULL, true); stat->mode = mode; if (err) goto out2; if (S_ISREG(stat->mode)) { struct path upperpath; ovl_path_upper(dentry, &upperpath); BUG_ON(upperpath.dentry != NULL); upperpath.dentry = newdentry; err = ovl_copy_up_data(lowerpath, &upperpath, stat->size); if (err) goto out_cleanup; } err = ovl_copy_xattr(lowerpath->dentry, newdentry); if (err) goto out_cleanup; mutex_lock(&newdentry->d_inode->i_mutex); err = ovl_set_attr(newdentry, stat); if (!err && attr) err = notify_change(newdentry, attr, NULL); mutex_unlock(&newdentry->d_inode->i_mutex); if (err) goto out_cleanup; err = ovl_do_rename(wdir, newdentry, udir, upper, 0); if (err) goto out_cleanup; ovl_dentry_update(dentry, newdentry); newdentry = NULL; /* * Non-directores become opaque when copied up. */ if (!S_ISDIR(stat->mode)) ovl_dentry_set_opaque(dentry, true); out2: dput(upper); out1: dput(newdentry); out: return err; out_cleanup: ovl_cleanup(wdir, newdentry); goto out; } /* * Copy up a single dentry * * Directory renames only allowed on "pure upper" (already created on * upper filesystem, never copied up). Directories which are on lower or * are merged may not be renamed. For these -EXDEV is returned and * userspace has to deal with it. This means, when copying up a * directory we can rely on it and ancestors being stable. * * Non-directory renames start with copy up of source if necessary. The * actual rename will only proceed once the copy up was successful. Copy * up uses upper parent i_mutex for exclusion. Since rename can change * d_parent it is possible that the copy up will lock the old parent. At * that point the file will have already been copied up anyway. */ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, struct path *lowerpath, struct kstat *stat, struct iattr *attr) { struct dentry *workdir = ovl_workdir(dentry); int err; struct kstat pstat; struct path parentpath; struct dentry *upperdir; struct dentry *upperdentry; const struct cred *old_cred; struct cred *override_cred; char *link = NULL; if (WARN_ON(!workdir)) return -EROFS; ovl_path_upper(parent, &parentpath); upperdir = parentpath.dentry; err = vfs_getattr(&parentpath, &pstat); if (err) return err; if (S_ISLNK(stat->mode)) { link = ovl_read_symlink(lowerpath->dentry); if (IS_ERR(link)) return PTR_ERR(link); } err = -ENOMEM; override_cred = prepare_creds(); if (!override_cred) goto out_free_link; override_cred->fsuid = stat->uid; override_cred->fsgid = stat->gid; /* * CAP_SYS_ADMIN for copying up extended attributes * CAP_DAC_OVERRIDE for create * CAP_FOWNER for chmod, timestamp update * CAP_FSETID for chmod * CAP_CHOWN for chown * CAP_MKNOD for mknod */ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); cap_raise(override_cred->cap_effective, CAP_FOWNER); cap_raise(override_cred->cap_effective, CAP_FSETID); cap_raise(override_cred->cap_effective, CAP_CHOWN); cap_raise(override_cred->cap_effective, CAP_MKNOD); old_cred = override_creds(override_cred); err = -EIO; if (lock_rename(workdir, upperdir) != NULL) { pr_err("overlayfs: failed to lock workdir+upperdir\n"); goto out_unlock; } upperdentry = ovl_dentry_upper(dentry); if (upperdentry) { unlock_rename(workdir, upperdir); err = 0; /* Raced with another copy-up? Do the setattr here */ if (attr) { mutex_lock(&upperdentry->d_inode->i_mutex); err = notify_change(upperdentry, attr, NULL); mutex_unlock(&upperdentry->d_inode->i_mutex); } goto out_put_cred; } err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, stat, attr, link); if (!err) { /* Restore timestamps on parent (best effort) */ ovl_set_timestamps(upperdir, &pstat); } out_unlock: unlock_rename(workdir, upperdir); out_put_cred: revert_creds(old_cred); put_cred(override_cred); out_free_link: if (link) free_page((unsigned long) link); return err; } int ovl_copy_up(struct dentry *dentry) { int err; err = 0; while (!err) { struct dentry *next; struct dentry *parent; struct path lowerpath; struct kstat stat; enum ovl_path_type type = ovl_path_type(dentry); if (OVL_TYPE_UPPER(type)) break; next = dget(dentry); /* find the topmost dentry not yet copied up */ for (;;) { parent = dget_parent(next); type = ovl_path_type(parent); if (OVL_TYPE_UPPER(type)) break; dput(next); next = parent; } ovl_path_lower(next, &lowerpath); err = vfs_getattr(&lowerpath, &stat); if (!err) err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL); dput(parent); dput(next); } return err; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_1845_0
crossvul-cpp_data_good_3520_0
/* * Handle firewalling * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * Bart De Schuymer <bdschuym@pandora.be> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Lennert dedicates this file to Kerstin Wurdinger. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/if_pppox.h> #include <linux/ppp_defs.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter_arp.h> #include <linux/in_route.h> #include <linux/inetdevice.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/route.h> #include <asm/uaccess.h> #include "br_private.h" #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #define skb_origaddr(skb) (((struct bridge_skb_cb *) \ (skb->nf_bridge->data))->daddr.ipv4) #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr) #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr) #ifdef CONFIG_SYSCTL static struct ctl_table_header *brnf_sysctl_header; static int brnf_call_iptables __read_mostly = 1; static int brnf_call_ip6tables __read_mostly = 1; static int brnf_call_arptables __read_mostly = 1; static int brnf_filter_vlan_tagged __read_mostly = 0; static int brnf_filter_pppoe_tagged __read_mostly = 0; #else #define brnf_call_iptables 1 #define brnf_call_ip6tables 1 #define brnf_call_arptables 1 #define brnf_filter_vlan_tagged 0 #define brnf_filter_pppoe_tagged 0 #endif static inline __be16 vlan_proto(const struct sk_buff *skb) { if (vlan_tx_tag_present(skb)) return skb->protocol; else if (skb->protocol == htons(ETH_P_8021Q)) return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; else return 0; } #define IS_VLAN_IP(skb) \ (vlan_proto(skb) == htons(ETH_P_IP) && \ brnf_filter_vlan_tagged) #define IS_VLAN_IPV6(skb) \ (vlan_proto(skb) == htons(ETH_P_IPV6) && \ brnf_filter_vlan_tagged) #define IS_VLAN_ARP(skb) \ (vlan_proto(skb) == htons(ETH_P_ARP) && \ brnf_filter_vlan_tagged) static inline __be16 pppoe_proto(const struct sk_buff *skb) { return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN + sizeof(struct pppoe_hdr))); } #define IS_PPPOE_IP(skb) \ (skb->protocol == htons(ETH_P_PPP_SES) && \ pppoe_proto(skb) == htons(PPP_IP) && \ brnf_filter_pppoe_tagged) #define IS_PPPOE_IPV6(skb) \ (skb->protocol == htons(ETH_P_PPP_SES) && \ pppoe_proto(skb) == htons(PPP_IPV6) && \ brnf_filter_pppoe_tagged) static void fake_update_pmtu(struct dst_entry *dst, u32 mtu) { } static struct dst_ops fake_dst_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .update_pmtu = fake_update_pmtu, }; /* * Initialize bogus route table used to keep netfilter happy. * Currently, we fill in the PMTU entry because netfilter * refragmentation needs it, and the rt_flags entry because * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; atomic_set(&rt->dst.__refcnt, 1); rt->dst.dev = br->dev; rt->dst.path = &rt->dst; dst_metric_set(&rt->dst, RTAX_MTU, 1500); rt->dst.flags = DST_NOXFRM; rt->dst.ops = &fake_dst_ops; } static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) { struct net_bridge_port *port; port = br_port_get_rcu(dev); return port ? &port->br->fake_rtable : NULL; } static inline struct net_device *bridge_parent(const struct net_device *dev) { struct net_bridge_port *port; port = br_port_get_rcu(dev); return port ? port->br->dev : NULL; } static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) { skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC); if (likely(skb->nf_bridge)) atomic_set(&(skb->nf_bridge->use), 1); return skb->nf_bridge; } static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (atomic_read(&nf_bridge->use) > 1) { struct nf_bridge_info *tmp = nf_bridge_alloc(skb); if (tmp) { memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); atomic_set(&tmp->use, 1); } nf_bridge_put(nf_bridge); nf_bridge = tmp; } return nf_bridge; } static inline void nf_bridge_push_encap_header(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_push(skb, len); skb->network_header -= len; } static inline void nf_bridge_pull_encap_header(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_pull(skb, len); skb->network_header += len; } static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_pull_rcsum(skb, len); skb->network_header += len; } static inline void nf_bridge_save_header(struct sk_buff *skb) { int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); skb_copy_from_linear_data_offset(skb, -header_size, skb->nf_bridge->data, header_size); } static inline void nf_bridge_update_protocol(struct sk_buff *skb) { if (skb->nf_bridge->mask & BRNF_8021Q) skb->protocol = htons(ETH_P_8021Q); else if (skb->nf_bridge->mask & BRNF_PPPoE) skb->protocol = htons(ETH_P_PPP_SES); } /* When handing a packet over to the IP layer * check whether we have a skb that is in the * expected format */ static int br_parse_ip_options(struct sk_buff *skb) { struct ip_options *opt; struct iphdr *iph; struct net_device *dev = skb->dev; u32 len; iph = ip_hdr(skb); opt = &(IPCB(skb)->opt); /* Basic sanity checks */ if (iph->ihl < 5 || iph->version != 4) goto inhdr_error; if (!pskb_may_pull(skb, iph->ihl*4)) goto inhdr_error; iph = ip_hdr(skb); if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) goto inhdr_error; len = ntohs(iph->tot_len); if (skb->len < len) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } else if (len < (iph->ihl*4)) goto inhdr_error; if (pskb_trim_rcsum(skb, len)) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); goto drop; } memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); if (iph->ihl == 5) return 0; opt->optlen = iph->ihl*4 - sizeof(struct iphdr); if (ip_options_compile(dev_net(dev), opt, skb)) goto inhdr_error; /* Check correct handling of SRR option */ if (unlikely(opt->srr)) { struct in_device *in_dev = __in_dev_get_rcu(dev); if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev)) goto drop; if (ip_options_rcv_srr(skb)) goto drop; } return 0; inhdr_error: IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); drop: return -1; } /* Fill in the header for fragmented IP packets handled by * the IPv4 connection tracking code. */ int nf_bridge_copy_header(struct sk_buff *skb) { int err; unsigned int header_size; nf_bridge_update_protocol(skb); header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); err = skb_cow_head(skb, header_size); if (err) return err; skb_copy_to_linear_data_offset(skb, -header_size, skb->nf_bridge->data, header_size); __skb_push(skb, nf_bridge_encap_header_len(skb)); return 0; } /* PF_BRIDGE/PRE_ROUTING *********************************************/ /* Undo the changes made for ip6tables PREROUTING and continue the * bridge PRE_ROUTING hook. */ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; rt = bridge_parent_rtable(nf_bridge->physindev); if (!rt) { kfree_skb(skb); return 0; } skb_dst_set_noref(skb, &rt->dst); skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); return 0; } /* Obtain the correct destination MAC address, while preserving the original * source MAC address. If we already know this address, we just copy it. If we * don't, we use the neighbour framework to find out. In both cases, we make * sure that br_handle_frame_finish() is called afterwards. */ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct dst_entry *dst; skb->dev = bridge_parent(skb->dev); if (!skb->dev) goto free_skb; dst = skb_dst(skb); if (dst->hh) { neigh_hh_bridge(dst->hh, skb); skb->dev = nf_bridge->physindev; return br_handle_frame_finish(skb); } else if (dst->neighbour) { /* the neighbour function below overwrites the complete * MAC header, so we save the Ethernet source address and * protocol number. */ skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); /* tell br_dev_xmit to continue with forwarding */ nf_bridge->mask |= BRNF_BRIDGED_DNAT; return dst->neighbour->output(skb); } free_skb: kfree_skb(skb); return 0; } /* This requires some explaining. If DNAT has taken place, * we will need to fix up the destination Ethernet address. * * There are two cases to consider: * 1. The packet was DNAT'ed to a device in the same bridge * port group as it was received on. We can still bridge * the packet. * 2. The packet was DNAT'ed to a different device, either * a non-bridged device or another bridge port group. * The packet will need to be routed. * * The correct way of distinguishing between these two cases is to * call ip_route_input() and to look at skb->dst->dev, which is * changed to the destination device if ip_route_input() succeeds. * * Let's first consider the case that ip_route_input() succeeds: * * If the output device equals the logical bridge device the packet * came in on, we can consider this bridging. The corresponding MAC * address will be obtained in br_nf_pre_routing_finish_bridge. * Otherwise, the packet is considered to be routed and we just * change the destination MAC address so that the packet will * later be passed up to the IP stack to be routed. For a redirected * packet, ip_route_input() will give back the localhost as output device, * which differs from the bridge device. * * Let's now consider the case that ip_route_input() fails: * * This can be because the destination address is martian, in which case * the packet will be dropped. * If IP forwarding is disabled, ip_route_input() will fail, while * ip_route_output_key() can return success. The source * address for ip_route_output_key() is set to zero, so ip_route_output_key() * thinks we're handling a locally generated packet and won't care * if IP forwarding is enabled. If the output device equals the logical bridge * device, we proceed as if ip_route_input() succeeded. If it differs from the * logical bridge port or if ip_route_output_key() fails we drop the packet. */ static int br_nf_pre_routing_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; int err; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; if (dnat_took_place(skb)) { if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a * martian destination or due to the fact that * forwarding is disabled. For most martian packets, * ip_route_output_key() will fail. It won't fail for 2 types of * martian destinations: loopback destinations and destination * 0.0.0.0. In both cases the packet will be dropped because the * destination is the loopback device and not the bridge. */ if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) goto free_skb; rt = ip_route_output(dev_net(dev), iph->daddr, 0, RT_TOS(iph->tos), 0); if (!IS_ERR(rt)) { /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (rt->dst.dev == dev) { skb_dst_set(skb, &rt->dst); goto bridged_dnat; } ip_rt_put(rt); } free_skb: kfree_skb(skb); return 0; } else { if (skb_dst(skb)->dev == dev) { bridged_dnat: skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge, 1); return 0; } memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN); skb->pkt_type = PACKET_HOST; } } else { rt = bridge_parent_rtable(nf_bridge->physindev); if (!rt) { kfree_skb(skb); return 0; } skb_dst_set_noref(skb, &rt->dst); } skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); return 0; } /* Some common code for IPv4/IPv6 */ static struct net_device *setup_pre_routing(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; nf_bridge->physindev = skb->dev; skb->dev = bridge_parent(skb->dev); if (skb->protocol == htons(ETH_P_8021Q)) nf_bridge->mask |= BRNF_8021Q; else if (skb->protocol == htons(ETH_P_PPP_SES)) nf_bridge->mask |= BRNF_PPPoE; return skb->dev; } /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */ static int check_hbh_len(struct sk_buff *skb) { unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1); u32 pkt_len; const unsigned char *nh = skb_network_header(skb); int off = raw - nh; int len = (raw[1] + 1) << 3; if ((raw + len) - skb->data > skb_headlen(skb)) goto bad; off += 2; len -= 2; while (len > 0) { int optlen = nh[off + 1] + 2; switch (nh[off]) { case IPV6_TLV_PAD0: optlen = 1; break; case IPV6_TLV_PADN: break; case IPV6_TLV_JUMBO: if (nh[off + 1] != 4 || (off & 3) != 2) goto bad; pkt_len = ntohl(*(__be32 *) (nh + off + 2)); if (pkt_len <= IPV6_MAXPLEN || ipv6_hdr(skb)->payload_len) goto bad; if (pkt_len > skb->len - sizeof(struct ipv6hdr)) goto bad; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto bad; nh = skb_network_header(skb); break; default: if (optlen > len) goto bad; break; } off += optlen; len -= optlen; } if (len == 0) return 0; bad: return -1; } /* Replicate the checks that IPv6 does on packet reception and pass the packet * to ip6tables, which doesn't support NAT, so things are fairly simple. */ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct ipv6hdr *hdr; u32 pkt_len; if (skb->len < sizeof(struct ipv6hdr)) return NF_DROP; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return NF_DROP; hdr = ipv6_hdr(skb); if (hdr->version != 6) return NF_DROP; pkt_len = ntohs(hdr->payload_len); if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) return NF_DROP; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) return NF_DROP; } if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb)) return NF_DROP; nf_bridge_put(skb->nf_bridge); if (!nf_bridge_alloc(skb)) return NF_DROP; if (!setup_pre_routing(skb)) return NF_DROP; skb->protocol = htons(ETH_P_IPV6); NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_ipv6); return NF_STOLEN; } /* Direct IPv6 traffic to br_nf_pre_routing_ipv6. * Replicate the checks that IPv4 does on packet reception. * Set skb->dev to the bridge device (i.e. parent of the * receiving device) to make netfilter happy, the REDIRECT * target in particular. Save the original destination IP * address to be able to detect DNAT afterwards. */ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct net_bridge_port *p; struct net_bridge *br; __u32 len = nf_bridge_encap_header_len(skb); if (unlikely(!pskb_may_pull(skb, len))) return NF_DROP; p = br_port_get_rcu(in); if (p == NULL) return NF_DROP; br = p->br; if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) { if (!brnf_call_ip6tables && !br->nf_call_ip6tables) return NF_ACCEPT; nf_bridge_pull_encap_header_rcsum(skb); return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); } if (!brnf_call_iptables && !br->nf_call_iptables) return NF_ACCEPT; if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb)) return NF_ACCEPT; nf_bridge_pull_encap_header_rcsum(skb); if (br_parse_ip_options(skb)) return NF_DROP; nf_bridge_put(skb->nf_bridge); if (!nf_bridge_alloc(skb)) return NF_DROP; if (!setup_pre_routing(skb)) return NF_DROP; store_orig_dstaddr(skb); skb->protocol = htons(ETH_P_IP); NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish); return NF_STOLEN; } /* PF_BRIDGE/LOCAL_IN ************************************************/ /* The packet is locally destined, which requires a real * dst_entry, so detach the fake one. On the way up, the * packet would pass through PRE_ROUTING again (which already * took place when the packet entered the bridge), but we * register an IPv4 PRE_ROUTING 'sabotage' hook that will * prevent this from happening. */ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct rtable *rt = skb_rtable(skb); if (rt && rt == bridge_parent_rtable(in)) skb_dst_drop(skb); return NF_ACCEPT; } /* PF_BRIDGE/FORWARD *************************************************/ static int br_nf_forward_finish(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct net_device *in; if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) { in = nf_bridge->physindev; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge_update_protocol(skb); } else { in = *((struct net_device **)(skb->cb)); } nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in, skb->dev, br_forward_finish, 1); return 0; } /* This is the 'purely bridged' case. For IP, we pass the packet to * netfilter with indev and outdev set to the bridge device, * but we are still able to filter on the 'real' indev/outdev * because of the physdev module. For ARP, indev and outdev are the * bridge ports. */ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct nf_bridge_info *nf_bridge; struct net_device *parent; u_int8_t pf; if (!skb->nf_bridge) return NF_ACCEPT; /* Need exclusive nf_bridge_info since we might have multiple * different physoutdevs. */ if (!nf_bridge_unshare(skb)) return NF_DROP; parent = bridge_parent(out); if (!parent) return NF_DROP; if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) pf = PF_INET; else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) pf = PF_INET6; else return NF_ACCEPT; nf_bridge_pull_encap_header(skb); nf_bridge = skb->nf_bridge; if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } if (br_parse_ip_options(skb)) return NF_DROP; /* The physdev module checks on this */ nf_bridge->mask |= BRNF_BRIDGED; nf_bridge->physoutdev = skb->dev; if (pf == PF_INET) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, br_nf_forward_finish); return NF_STOLEN; } static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct net_bridge_port *p; struct net_bridge *br; struct net_device **d = (struct net_device **)(skb->cb); p = br_port_get_rcu(out); if (p == NULL) return NF_ACCEPT; br = p->br; if (!brnf_call_arptables && !br->nf_call_arptables) return NF_ACCEPT; if (skb->protocol != htons(ETH_P_ARP)) { if (!IS_VLAN_ARP(skb)) return NF_ACCEPT; nf_bridge_pull_encap_header(skb); } if (arp_hdr(skb)->ar_pln != 4) { if (IS_VLAN_ARP(skb)) nf_bridge_push_encap_header(skb); return NF_ACCEPT; } *d = (struct net_device *)in; NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in, (struct net_device *)out, br_nf_forward_finish); return NF_STOLEN; } #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) static int br_nf_dev_queue_xmit(struct sk_buff *skb) { int ret; if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && !skb_is_gso(skb)) { if (br_parse_ip_options(skb)) /* Drop invalid packet */ return NF_DROP; ret = ip_fragment(skb, br_dev_queue_push_xmit); } else ret = br_dev_queue_push_xmit(skb); return ret; } #else static int br_nf_dev_queue_xmit(struct sk_buff *skb) { return br_dev_queue_push_xmit(skb); } #endif /* PF_BRIDGE/POST_ROUTING ********************************************/ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct net_device *realoutdev = bridge_parent(skb->dev); u_int8_t pf; if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED)) return NF_ACCEPT; if (!realoutdev) return NF_DROP; if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) pf = PF_INET; else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) pf = PF_INET6; else return NF_ACCEPT; /* We assume any code from br_dev_queue_push_xmit onwards doesn't care * about the value of skb->pkt_type. */ if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } nf_bridge_pull_encap_header(skb); nf_bridge_save_header(skb); if (pf == PF_INET) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev, br_nf_dev_queue_xmit); return NF_STOLEN; } /* IP/SABOTAGE *****************************************************/ /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING * for the second time. */ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { if (skb->nf_bridge && !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { return NF_STOP; } return NF_ACCEPT; } /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because * br_dev_queue_push_xmit is called afterwards */ static struct nf_hook_ops br_nf_ops[] __read_mostly = { { .hook = br_nf_pre_routing, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_PRE_ROUTING, .priority = NF_BR_PRI_BRNF, }, { .hook = br_nf_local_in, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_LOCAL_IN, .priority = NF_BR_PRI_BRNF, }, { .hook = br_nf_forward_ip, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_BRNF - 1, }, { .hook = br_nf_forward_arp, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_BRNF, }, { .hook = br_nf_post_routing, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_POST_ROUTING, .priority = NF_BR_PRI_LAST, }, { .hook = ip_sabotage_in, .owner = THIS_MODULE, .pf = PF_INET, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP_PRI_FIRST, }, { .hook = ip_sabotage_in, .owner = THIS_MODULE, .pf = PF_INET6, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP6_PRI_FIRST, }, }; #ifdef CONFIG_SYSCTL static int brnf_sysctl_call_tables(ctl_table * ctl, int write, void __user * buffer, size_t * lenp, loff_t * ppos) { int ret; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (write && *(int *)(ctl->data)) *(int *)(ctl->data) = 1; return ret; } static ctl_table brnf_table[] = { { .procname = "bridge-nf-call-arptables", .data = &brnf_call_arptables, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-call-iptables", .data = &brnf_call_iptables, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-call-ip6tables", .data = &brnf_call_ip6tables, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-filter-vlan-tagged", .data = &brnf_filter_vlan_tagged, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-filter-pppoe-tagged", .data = &brnf_filter_pppoe_tagged, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { } }; static struct ctl_path brnf_path[] = { { .procname = "net", }, { .procname = "bridge", }, { } }; #endif int __init br_netfilter_init(void) { int ret; ret = dst_entries_init(&fake_dst_ops); if (ret < 0) return ret; ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); if (ret < 0) { dst_entries_destroy(&fake_dst_ops); return ret; } #ifdef CONFIG_SYSCTL brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); if (brnf_sysctl_header == NULL) { printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); dst_entries_destroy(&fake_dst_ops); return -ENOMEM; } #endif printk(KERN_NOTICE "Bridge firewalling registered\n"); return 0; } void br_netfilter_fini(void) { nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); #ifdef CONFIG_SYSCTL unregister_sysctl_table(brnf_sysctl_header); #endif dst_entries_destroy(&fake_dst_ops); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3520_0
crossvul-cpp_data_good_3431_3
/* * reflection.c: Routines for creating an image at runtime. * * Author: * Paolo Molaro (lupus@ximian.com) * * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com) * Copyright 2004-2009 Novell, Inc (http://www.novell.com) * */ #include <config.h> #include "mono/utils/mono-digest.h" #include "mono/utils/mono-membar.h" #include "mono/metadata/reflection.h" #include "mono/metadata/tabledefs.h" #include "mono/metadata/metadata-internals.h" #include <mono/metadata/profiler-private.h> #include "mono/metadata/class-internals.h" #include "mono/metadata/gc-internal.h" #include "mono/metadata/tokentype.h" #include "mono/metadata/domain-internals.h" #include "mono/metadata/opcodes.h" #include "mono/metadata/assembly.h" #include "mono/metadata/object-internals.h" #include <mono/metadata/exception.h> #include <mono/metadata/marshal.h> #include <mono/metadata/security-manager.h> #include <stdio.h> #include <glib.h> #include <errno.h> #include <time.h> #include <string.h> #include <ctype.h> #include "image.h" #include "cil-coff.h" #include "mono-endian.h" #include <mono/metadata/gc-internal.h> #include <mono/metadata/mempool-internals.h> #include <mono/metadata/security-core-clr.h> #include <mono/metadata/debug-helpers.h> #include <mono/utils/mono-string.h> #include <mono/utils/mono-error-internals.h> #if HAVE_SGEN_GC static void* reflection_info_desc = NULL; #define MOVING_GC_REGISTER(addr) do { \ if (!reflection_info_desc) { \ gsize bmap = 1; \ reflection_info_desc = mono_gc_make_descr_from_bitmap (&bmap, 1); \ } \ mono_gc_register_root ((char*)(addr), sizeof (gpointer), reflection_info_desc); \ } while (0) #else #define MOVING_GC_REGISTER(addr) #endif typedef struct { char *p; char *buf; char *end; } SigBuffer; #define TEXT_OFFSET 512 #define CLI_H_SIZE 136 #define FILE_ALIGN 512 #define VIRT_ALIGN 8192 #define START_TEXT_RVA 0x00002000 typedef struct { MonoReflectionILGen *ilgen; MonoReflectionType *rtype; MonoArray *parameters; MonoArray *generic_params; MonoGenericContainer *generic_container; MonoArray *pinfo; MonoArray *opt_types; guint32 attrs; guint32 iattrs; guint32 call_conv; guint32 *table_idx; /* note: it's a pointer */ MonoArray *code; MonoObject *type; MonoString *name; MonoBoolean init_locals; MonoBoolean skip_visibility; MonoArray *return_modreq; MonoArray *return_modopt; MonoArray *param_modreq; MonoArray *param_modopt; MonoArray *permissions; MonoMethod *mhandle; guint32 nrefs; gpointer *refs; /* for PInvoke */ int charset, extra_flags, native_cc; MonoString *dll, *dllentry; } ReflectionMethodBuilder; typedef struct { guint32 owner; MonoReflectionGenericParam *gparam; } GenericParamTableEntry; const unsigned char table_sizes [MONO_TABLE_NUM] = { MONO_MODULE_SIZE, MONO_TYPEREF_SIZE, MONO_TYPEDEF_SIZE, 0, MONO_FIELD_SIZE, 0, MONO_METHOD_SIZE, 0, MONO_PARAM_SIZE, MONO_INTERFACEIMPL_SIZE, MONO_MEMBERREF_SIZE, /* 0x0A */ MONO_CONSTANT_SIZE, MONO_CUSTOM_ATTR_SIZE, MONO_FIELD_MARSHAL_SIZE, MONO_DECL_SECURITY_SIZE, MONO_CLASS_LAYOUT_SIZE, MONO_FIELD_LAYOUT_SIZE, /* 0x10 */ MONO_STAND_ALONE_SIGNATURE_SIZE, MONO_EVENT_MAP_SIZE, 0, MONO_EVENT_SIZE, MONO_PROPERTY_MAP_SIZE, 0, MONO_PROPERTY_SIZE, MONO_METHOD_SEMA_SIZE, MONO_METHODIMPL_SIZE, MONO_MODULEREF_SIZE, /* 0x1A */ MONO_TYPESPEC_SIZE, MONO_IMPLMAP_SIZE, MONO_FIELD_RVA_SIZE, 0, 0, MONO_ASSEMBLY_SIZE, /* 0x20 */ MONO_ASSEMBLY_PROCESSOR_SIZE, MONO_ASSEMBLYOS_SIZE, MONO_ASSEMBLYREF_SIZE, MONO_ASSEMBLYREFPROC_SIZE, MONO_ASSEMBLYREFOS_SIZE, MONO_FILE_SIZE, MONO_EXP_TYPE_SIZE, MONO_MANIFEST_SIZE, MONO_NESTED_CLASS_SIZE, MONO_GENERICPARAM_SIZE, /* 0x2A */ MONO_METHODSPEC_SIZE, MONO_GENPARCONSTRAINT_SIZE }; #ifndef DISABLE_REFLECTION_EMIT static guint32 mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec); static guint32 mono_image_get_methodbuilder_token (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb, gboolean create_methodspec); static guint32 mono_image_get_ctorbuilder_token (MonoDynamicImage *assembly, MonoReflectionCtorBuilder *cb); static guint32 mono_image_get_sighelper_token (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper); static void ensure_runtime_vtable (MonoClass *klass); static gpointer resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context); static guint32 mono_image_get_methodref_token_for_methodbuilder (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *method); static guint32 encode_generic_method_sig (MonoDynamicImage *assembly, MonoGenericContext *context); static gpointer register_assembly (MonoDomain *domain, MonoReflectionAssembly *res, MonoAssembly *assembly); static void reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb); static void reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb); #endif static guint32 mono_image_typedef_or_ref (MonoDynamicImage *assembly, MonoType *type); static guint32 mono_image_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec); static void mono_image_get_generic_param_info (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly); static guint32 encode_marshal_blob (MonoDynamicImage *assembly, MonoReflectionMarshal *minfo); static guint32 encode_constant (MonoDynamicImage *assembly, MonoObject *val, guint32 *ret_type); static char* type_get_qualified_name (MonoType *type, MonoAssembly *ass); static void encode_type (MonoDynamicImage *assembly, MonoType *type, SigBuffer *buf); static void get_default_param_value_blobs (MonoMethod *method, char **blobs, guint32 *types); static MonoObject *mono_get_object_from_blob (MonoDomain *domain, MonoType *type, const char *blob); static MonoReflectionType *mono_reflection_type_get_underlying_system_type (MonoReflectionType* t); static MonoType* mono_reflection_get_type_with_rootimage (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve); static MonoReflectionType* mono_reflection_type_resolve_user_types (MonoReflectionType *type); static gboolean is_sre_array (MonoClass *class); static gboolean is_sre_byref (MonoClass *class); static gboolean is_sre_pointer (MonoClass *class); static gboolean is_sre_method_builder (MonoClass *class); static gboolean is_sre_ctor_builder (MonoClass *class); static gboolean is_sr_mono_method (MonoClass *class); static gboolean is_sr_mono_cmethod (MonoClass *class); static gboolean is_sr_mono_generic_method (MonoClass *class); static gboolean is_sr_mono_generic_cmethod (MonoClass *class); static gboolean is_sr_mono_field (MonoClass *class); static gboolean is_sr_mono_property (MonoClass *class); static gboolean is_sre_method_on_tb_inst (MonoClass *class); static gboolean is_sre_ctor_on_tb_inst (MonoClass *class); static guint32 mono_image_get_methodspec_token (MonoDynamicImage *assembly, MonoMethod *method); static guint32 mono_image_get_inflated_method_token (MonoDynamicImage *assembly, MonoMethod *m); static MonoMethod * inflate_method (MonoReflectionGenericClass *type, MonoObject *obj); #define RESOLVE_TYPE(type) do { type = (void*)mono_reflection_type_resolve_user_types ((MonoReflectionType*)type); } while (0) #define RESOLVE_ARRAY_TYPE_ELEMENT(array, index) do { \ MonoReflectionType *__type = mono_array_get (array, MonoReflectionType*, index); \ __type = mono_reflection_type_resolve_user_types (__type); \ mono_array_set (arr, MonoReflectionType*, index, __type); \ } while (0) #define mono_type_array_get_and_resolve(array, index) mono_reflection_type_get_handle ((MonoReflectionType*)mono_array_get (array, gpointer, index)) void mono_reflection_init (void) { } static void sigbuffer_init (SigBuffer *buf, int size) { buf->buf = g_malloc (size); buf->p = buf->buf; buf->end = buf->buf + size; } static void sigbuffer_make_room (SigBuffer *buf, int size) { if (buf->end - buf->p < size) { int new_size = buf->end - buf->buf + size + 32; char *p = g_realloc (buf->buf, new_size); size = buf->p - buf->buf; buf->buf = p; buf->p = p + size; buf->end = buf->buf + new_size; } } static void sigbuffer_add_value (SigBuffer *buf, guint32 val) { sigbuffer_make_room (buf, 6); mono_metadata_encode_value (val, buf->p, &buf->p); } static void sigbuffer_add_byte (SigBuffer *buf, guint8 val) { sigbuffer_make_room (buf, 1); buf->p [0] = val; buf->p++; } static void sigbuffer_add_mem (SigBuffer *buf, char *p, guint32 size) { sigbuffer_make_room (buf, size); memcpy (buf->p, p, size); buf->p += size; } static void sigbuffer_free (SigBuffer *buf) { g_free (buf->buf); } #ifndef DISABLE_REFLECTION_EMIT /** * mp_g_alloc: * * Allocate memory from the @image mempool if it is non-NULL. Otherwise, allocate memory * from the C heap. */ static gpointer image_g_malloc (MonoImage *image, guint size) { if (image) return mono_image_alloc (image, size); else return g_malloc (size); } #endif /* !DISABLE_REFLECTION_EMIT */ /** * image_g_alloc0: * * Allocate memory from the @image mempool if it is non-NULL. Otherwise, allocate memory * from the C heap. */ static gpointer image_g_malloc0 (MonoImage *image, guint size) { if (image) return mono_image_alloc0 (image, size); else return g_malloc0 (size); } #ifndef DISABLE_REFLECTION_EMIT static char* image_strdup (MonoImage *image, const char *s) { if (image) return mono_image_strdup (image, s); else return g_strdup (s); } #endif #define image_g_new(image,struct_type, n_structs) \ ((struct_type *) image_g_malloc (image, ((gsize) sizeof (struct_type)) * ((gsize) (n_structs)))) #define image_g_new0(image,struct_type, n_structs) \ ((struct_type *) image_g_malloc0 (image, ((gsize) sizeof (struct_type)) * ((gsize) (n_structs)))) static void alloc_table (MonoDynamicTable *table, guint nrows) { table->rows = nrows; g_assert (table->columns); if (nrows + 1 >= table->alloc_rows) { while (nrows + 1 >= table->alloc_rows) { if (table->alloc_rows == 0) table->alloc_rows = 16; else table->alloc_rows *= 2; } table->values = g_renew (guint32, table->values, (table->alloc_rows) * table->columns); } } static void make_room_in_stream (MonoDynamicStream *stream, int size) { if (size <= stream->alloc_size) return; while (stream->alloc_size <= size) { if (stream->alloc_size < 4096) stream->alloc_size = 4096; else stream->alloc_size *= 2; } stream->data = g_realloc (stream->data, stream->alloc_size); } static guint32 string_heap_insert (MonoDynamicStream *sh, const char *str) { guint32 idx; guint32 len; gpointer oldkey, oldval; if (g_hash_table_lookup_extended (sh->hash, str, &oldkey, &oldval)) return GPOINTER_TO_UINT (oldval); len = strlen (str) + 1; idx = sh->index; make_room_in_stream (sh, idx + len); /* * We strdup the string even if we already copy them in sh->data * so that the string pointers in the hash remain valid even if * we need to realloc sh->data. We may want to avoid that later. */ g_hash_table_insert (sh->hash, g_strdup (str), GUINT_TO_POINTER (idx)); memcpy (sh->data + idx, str, len); sh->index += len; return idx; } static guint32 string_heap_insert_mstring (MonoDynamicStream *sh, MonoString *str) { char *name = mono_string_to_utf8 (str); guint32 idx; idx = string_heap_insert (sh, name); g_free (name); return idx; } #ifndef DISABLE_REFLECTION_EMIT static void string_heap_init (MonoDynamicStream *sh) { sh->index = 0; sh->alloc_size = 4096; sh->data = g_malloc (4096); sh->hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); string_heap_insert (sh, ""); } #endif static guint32 mono_image_add_stream_data (MonoDynamicStream *stream, const char *data, guint32 len) { guint32 idx; make_room_in_stream (stream, stream->index + len); memcpy (stream->data + stream->index, data, len); idx = stream->index; stream->index += len; /* * align index? Not without adding an additional param that controls it since * we may store a blob value in pieces. */ return idx; } static guint32 mono_image_add_stream_zero (MonoDynamicStream *stream, guint32 len) { guint32 idx; make_room_in_stream (stream, stream->index + len); memset (stream->data + stream->index, 0, len); idx = stream->index; stream->index += len; return idx; } static void stream_data_align (MonoDynamicStream *stream) { char buf [4] = {0}; guint32 count = stream->index % 4; /* we assume the stream data will be aligned */ if (count) mono_image_add_stream_data (stream, buf, 4 - count); } #ifndef DISABLE_REFLECTION_EMIT static int mono_blob_entry_hash (const char* str) { guint len, h; const char *end; len = mono_metadata_decode_blob_size (str, &str); if (len > 0) { end = str + len; h = *str; for (str += 1; str < end; str++) h = (h << 5) - h + *str; return h; } else { return 0; } } static gboolean mono_blob_entry_equal (const char *str1, const char *str2) { int len, len2; const char *end1; const char *end2; len = mono_metadata_decode_blob_size (str1, &end1); len2 = mono_metadata_decode_blob_size (str2, &end2); if (len != len2) return 0; return memcmp (end1, end2, len) == 0; } #endif static guint32 add_to_blob_cached (MonoDynamicImage *assembly, char *b1, int s1, char *b2, int s2) { guint32 idx; char *copy; gpointer oldkey, oldval; copy = g_malloc (s1+s2); memcpy (copy, b1, s1); memcpy (copy + s1, b2, s2); if (g_hash_table_lookup_extended (assembly->blob_cache, copy, &oldkey, &oldval)) { g_free (copy); idx = GPOINTER_TO_UINT (oldval); } else { idx = mono_image_add_stream_data (&assembly->blob, b1, s1); mono_image_add_stream_data (&assembly->blob, b2, s2); g_hash_table_insert (assembly->blob_cache, copy, GUINT_TO_POINTER (idx)); } return idx; } static guint32 sigbuffer_add_to_blob_cached (MonoDynamicImage *assembly, SigBuffer *buf) { char blob_size [8]; char *b = blob_size; guint32 size = buf->p - buf->buf; /* store length */ g_assert (size <= (buf->end - buf->buf)); mono_metadata_encode_value (size, b, &b); return add_to_blob_cached (assembly, blob_size, b-blob_size, buf->buf, size); } /* * Copy len * nelem bytes from val to dest, swapping bytes to LE if necessary. * dest may be misaligned. */ static void swap_with_size (char *dest, const char* val, int len, int nelem) { #if G_BYTE_ORDER != G_LITTLE_ENDIAN int elem; for (elem = 0; elem < nelem; ++elem) { switch (len) { case 1: *dest = *val; break; case 2: dest [0] = val [1]; dest [1] = val [0]; break; case 4: dest [0] = val [3]; dest [1] = val [2]; dest [2] = val [1]; dest [3] = val [0]; break; case 8: dest [0] = val [7]; dest [1] = val [6]; dest [2] = val [5]; dest [3] = val [4]; dest [4] = val [3]; dest [5] = val [2]; dest [6] = val [1]; dest [7] = val [0]; break; default: g_assert_not_reached (); } dest += len; val += len; } #else memcpy (dest, val, len * nelem); #endif } static guint32 add_mono_string_to_blob_cached (MonoDynamicImage *assembly, MonoString *str) { char blob_size [64]; char *b = blob_size; guint32 idx = 0, len; len = str->length * 2; mono_metadata_encode_value (len, b, &b); #if G_BYTE_ORDER != G_LITTLE_ENDIAN { char *swapped = g_malloc (2 * mono_string_length (str)); const char *p = (const char*)mono_string_chars (str); swap_with_size (swapped, p, 2, mono_string_length (str)); idx = add_to_blob_cached (assembly, blob_size, b-blob_size, swapped, len); g_free (swapped); } #else idx = add_to_blob_cached (assembly, blob_size, b-blob_size, (char*)mono_string_chars (str), len); #endif return idx; } #ifndef DISABLE_REFLECTION_EMIT static MonoClass * default_class_from_mono_type (MonoType *type) { switch (type->type) { case MONO_TYPE_OBJECT: return mono_defaults.object_class; case MONO_TYPE_VOID: return mono_defaults.void_class; case MONO_TYPE_BOOLEAN: return mono_defaults.boolean_class; case MONO_TYPE_CHAR: return mono_defaults.char_class; case MONO_TYPE_I1: return mono_defaults.sbyte_class; case MONO_TYPE_U1: return mono_defaults.byte_class; case MONO_TYPE_I2: return mono_defaults.int16_class; case MONO_TYPE_U2: return mono_defaults.uint16_class; case MONO_TYPE_I4: return mono_defaults.int32_class; case MONO_TYPE_U4: return mono_defaults.uint32_class; case MONO_TYPE_I: return mono_defaults.int_class; case MONO_TYPE_U: return mono_defaults.uint_class; case MONO_TYPE_I8: return mono_defaults.int64_class; case MONO_TYPE_U8: return mono_defaults.uint64_class; case MONO_TYPE_R4: return mono_defaults.single_class; case MONO_TYPE_R8: return mono_defaults.double_class; case MONO_TYPE_STRING: return mono_defaults.string_class; default: g_warning ("default_class_from_mono_type: implement me 0x%02x\n", type->type); g_assert_not_reached (); } return NULL; } #endif static void encode_generic_class (MonoDynamicImage *assembly, MonoGenericClass *gclass, SigBuffer *buf) { int i; MonoGenericInst *class_inst; MonoClass *klass; g_assert (gclass); class_inst = gclass->context.class_inst; sigbuffer_add_value (buf, MONO_TYPE_GENERICINST); klass = gclass->container_class; sigbuffer_add_value (buf, klass->byval_arg.type); sigbuffer_add_value (buf, mono_image_typedef_or_ref_full (assembly, &klass->byval_arg, FALSE)); sigbuffer_add_value (buf, class_inst->type_argc); for (i = 0; i < class_inst->type_argc; ++i) encode_type (assembly, class_inst->type_argv [i], buf); } static void encode_type (MonoDynamicImage *assembly, MonoType *type, SigBuffer *buf) { if (!type) { g_assert_not_reached (); return; } if (type->byref) sigbuffer_add_value (buf, MONO_TYPE_BYREF); switch (type->type){ case MONO_TYPE_VOID: case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_TYPEDBYREF: sigbuffer_add_value (buf, type->type); break; case MONO_TYPE_PTR: sigbuffer_add_value (buf, type->type); encode_type (assembly, type->data.type, buf); break; case MONO_TYPE_SZARRAY: sigbuffer_add_value (buf, type->type); encode_type (assembly, &type->data.klass->byval_arg, buf); break; case MONO_TYPE_VALUETYPE: case MONO_TYPE_CLASS: { MonoClass *k = mono_class_from_mono_type (type); if (k->generic_container) { MonoGenericClass *gclass = mono_metadata_lookup_generic_class (k, k->generic_container->context.class_inst, TRUE); encode_generic_class (assembly, gclass, buf); } else { /* * Make sure we use the correct type. */ sigbuffer_add_value (buf, k->byval_arg.type); /* * ensure only non-byref gets passed to mono_image_typedef_or_ref(), * otherwise two typerefs could point to the same type, leading to * verification errors. */ sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, &k->byval_arg)); } break; } case MONO_TYPE_ARRAY: sigbuffer_add_value (buf, type->type); encode_type (assembly, &type->data.array->eklass->byval_arg, buf); sigbuffer_add_value (buf, type->data.array->rank); sigbuffer_add_value (buf, 0); /* FIXME: set to 0 for now */ sigbuffer_add_value (buf, 0); break; case MONO_TYPE_GENERICINST: encode_generic_class (assembly, type->data.generic_class, buf); break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: sigbuffer_add_value (buf, type->type); sigbuffer_add_value (buf, mono_type_get_generic_param_num (type)); break; default: g_error ("need to encode type %x", type->type); } } static void encode_reflection_type (MonoDynamicImage *assembly, MonoReflectionType *type, SigBuffer *buf) { if (!type) { sigbuffer_add_value (buf, MONO_TYPE_VOID); return; } encode_type (assembly, mono_reflection_type_get_handle (type), buf); } static void encode_custom_modifiers (MonoDynamicImage *assembly, MonoArray *modreq, MonoArray *modopt, SigBuffer *buf) { int i; if (modreq) { for (i = 0; i < mono_array_length (modreq); ++i) { MonoType *mod = mono_type_array_get_and_resolve (modreq, i); sigbuffer_add_byte (buf, MONO_TYPE_CMOD_REQD); sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, mod)); } } if (modopt) { for (i = 0; i < mono_array_length (modopt); ++i) { MonoType *mod = mono_type_array_get_and_resolve (modopt, i); sigbuffer_add_byte (buf, MONO_TYPE_CMOD_OPT); sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, mod)); } } } #ifndef DISABLE_REFLECTION_EMIT static guint32 method_encode_signature (MonoDynamicImage *assembly, MonoMethodSignature *sig) { SigBuffer buf; int i; guint32 nparams = sig->param_count; guint32 idx; if (!assembly->save) return 0; sigbuffer_init (&buf, 32); /* * FIXME: vararg, explicit_this, differenc call_conv values... */ idx = sig->call_convention; if (sig->hasthis) idx |= 0x20; /* hasthis */ if (sig->generic_param_count) idx |= 0x10; /* generic */ sigbuffer_add_byte (&buf, idx); if (sig->generic_param_count) sigbuffer_add_value (&buf, sig->generic_param_count); sigbuffer_add_value (&buf, nparams); encode_type (assembly, sig->ret, &buf); for (i = 0; i < nparams; ++i) { if (i == sig->sentinelpos) sigbuffer_add_byte (&buf, MONO_TYPE_SENTINEL); encode_type (assembly, sig->params [i], &buf); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } #endif static guint32 method_builder_encode_signature (MonoDynamicImage *assembly, ReflectionMethodBuilder *mb) { /* * FIXME: reuse code from method_encode_signature(). */ SigBuffer buf; int i; guint32 nparams = mb->parameters ? mono_array_length (mb->parameters): 0; guint32 ngparams = mb->generic_params ? mono_array_length (mb->generic_params): 0; guint32 notypes = mb->opt_types ? mono_array_length (mb->opt_types): 0; guint32 idx; sigbuffer_init (&buf, 32); /* LAMESPEC: all the call conv spec is foobared */ idx = mb->call_conv & 0x60; /* has-this, explicit-this */ if (mb->call_conv & 2) idx |= 0x5; /* vararg */ if (!(mb->attrs & METHOD_ATTRIBUTE_STATIC)) idx |= 0x20; /* hasthis */ if (ngparams) idx |= 0x10; /* generic */ sigbuffer_add_byte (&buf, idx); if (ngparams) sigbuffer_add_value (&buf, ngparams); sigbuffer_add_value (&buf, nparams + notypes); encode_custom_modifiers (assembly, mb->return_modreq, mb->return_modopt, &buf); encode_reflection_type (assembly, mb->rtype, &buf); for (i = 0; i < nparams; ++i) { MonoArray *modreq = NULL; MonoArray *modopt = NULL; MonoReflectionType *pt; if (mb->param_modreq && (i < mono_array_length (mb->param_modreq))) modreq = mono_array_get (mb->param_modreq, MonoArray*, i); if (mb->param_modopt && (i < mono_array_length (mb->param_modopt))) modopt = mono_array_get (mb->param_modopt, MonoArray*, i); encode_custom_modifiers (assembly, modreq, modopt, &buf); pt = mono_array_get (mb->parameters, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } if (notypes) sigbuffer_add_byte (&buf, MONO_TYPE_SENTINEL); for (i = 0; i < notypes; ++i) { MonoReflectionType *pt; pt = mono_array_get (mb->opt_types, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 encode_locals (MonoDynamicImage *assembly, MonoReflectionILGen *ilgen) { MonoDynamicTable *table; guint32 *values; guint32 idx, sig_idx; guint nl = mono_array_length (ilgen->locals); SigBuffer buf; int i; sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x07); sigbuffer_add_value (&buf, nl); for (i = 0; i < nl; ++i) { MonoReflectionLocalBuilder *lb = mono_array_get (ilgen->locals, MonoReflectionLocalBuilder*, i); if (lb->is_pinned) sigbuffer_add_value (&buf, MONO_TYPE_PINNED); encode_reflection_type (assembly, (MonoReflectionType*)lb->type, &buf); } sig_idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); if (assembly->standalonesig_cache == NULL) assembly->standalonesig_cache = g_hash_table_new (NULL, NULL); idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->standalonesig_cache, GUINT_TO_POINTER (sig_idx))); if (idx) return idx; table = &assembly->tables [MONO_TABLE_STANDALONESIG]; idx = table->next_idx ++; table->rows ++; alloc_table (table, table->rows); values = table->values + idx * MONO_STAND_ALONE_SIGNATURE_SIZE; values [MONO_STAND_ALONE_SIGNATURE] = sig_idx; g_hash_table_insert (assembly->standalonesig_cache, GUINT_TO_POINTER (sig_idx), GUINT_TO_POINTER (idx)); return idx; } static guint32 method_count_clauses (MonoReflectionILGen *ilgen) { guint32 num_clauses = 0; int i; MonoILExceptionInfo *ex_info; for (i = 0; i < mono_array_length (ilgen->ex_handlers); ++i) { ex_info = (MonoILExceptionInfo*)mono_array_addr (ilgen->ex_handlers, MonoILExceptionInfo, i); if (ex_info->handlers) num_clauses += mono_array_length (ex_info->handlers); else num_clauses++; } return num_clauses; } #ifndef DISABLE_REFLECTION_EMIT static MonoExceptionClause* method_encode_clauses (MonoImage *image, MonoDynamicImage *assembly, MonoReflectionILGen *ilgen, guint32 num_clauses) { MonoExceptionClause *clauses; MonoExceptionClause *clause; MonoILExceptionInfo *ex_info; MonoILExceptionBlock *ex_block; guint32 finally_start; int i, j, clause_index;; clauses = image_g_new0 (image, MonoExceptionClause, num_clauses); clause_index = 0; for (i = mono_array_length (ilgen->ex_handlers) - 1; i >= 0; --i) { ex_info = (MonoILExceptionInfo*)mono_array_addr (ilgen->ex_handlers, MonoILExceptionInfo, i); finally_start = ex_info->start + ex_info->len; if (!ex_info->handlers) continue; for (j = 0; j < mono_array_length (ex_info->handlers); ++j) { ex_block = (MonoILExceptionBlock*)mono_array_addr (ex_info->handlers, MonoILExceptionBlock, j); clause = &(clauses [clause_index]); clause->flags = ex_block->type; clause->try_offset = ex_info->start; if (ex_block->type == MONO_EXCEPTION_CLAUSE_FINALLY) clause->try_len = finally_start - ex_info->start; else clause->try_len = ex_info->len; clause->handler_offset = ex_block->start; clause->handler_len = ex_block->len; if (ex_block->extype) { clause->data.catch_class = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)ex_block->extype)); } else { if (ex_block->type == MONO_EXCEPTION_CLAUSE_FILTER) clause->data.filter_offset = ex_block->filter_offset; else clause->data.filter_offset = 0; } finally_start = ex_block->start + ex_block->len; clause_index ++; } } return clauses; } #endif /* !DISABLE_REFLECTION_EMIT */ static guint32 method_encode_code (MonoDynamicImage *assembly, ReflectionMethodBuilder *mb) { char flags = 0; guint32 idx; guint32 code_size; gint32 max_stack, i; gint32 num_locals = 0; gint32 num_exception = 0; gint maybe_small; guint32 fat_flags; char fat_header [12]; guint32 int_value; guint16 short_value; guint32 local_sig = 0; guint32 header_size = 12; MonoArray *code; if ((mb->attrs & (METHOD_ATTRIBUTE_PINVOKE_IMPL | METHOD_ATTRIBUTE_ABSTRACT)) || (mb->iattrs & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME))) return 0; /*if (mb->name) g_print ("Encode method %s\n", mono_string_to_utf8 (mb->name));*/ if (mb->ilgen) { code = mb->ilgen->code; code_size = mb->ilgen->code_len; max_stack = mb->ilgen->max_stack; num_locals = mb->ilgen->locals ? mono_array_length (mb->ilgen->locals) : 0; if (mb->ilgen->ex_handlers) num_exception = method_count_clauses (mb->ilgen); } else { code = mb->code; if (code == NULL){ char *name = mono_string_to_utf8 (mb->name); char *str = g_strdup_printf ("Method %s does not have any IL associated", name); MonoException *exception = mono_get_exception_argument (NULL, "a method does not have any IL associated"); g_free (str); g_free (name); mono_raise_exception (exception); } code_size = mono_array_length (code); max_stack = 8; /* we probably need to run a verifier on the code... */ } stream_data_align (&assembly->code); /* check for exceptions, maxstack, locals */ maybe_small = (max_stack <= 8) && (!num_locals) && (!num_exception); if (maybe_small) { if (code_size < 64 && !(code_size & 1)) { flags = (code_size << 2) | 0x2; } else if (code_size < 32 && (code_size & 1)) { flags = (code_size << 2) | 0x6; /* LAMESPEC: see metadata.c */ } else { goto fat_header; } idx = mono_image_add_stream_data (&assembly->code, &flags, 1); /* add to the fixup todo list */ if (mb->ilgen && mb->ilgen->num_token_fixups) mono_g_hash_table_insert (assembly->token_fixups, mb->ilgen, GUINT_TO_POINTER (idx + 1)); mono_image_add_stream_data (&assembly->code, mono_array_addr (code, char, 0), code_size); return assembly->text_rva + idx; } fat_header: if (num_locals) local_sig = MONO_TOKEN_SIGNATURE | encode_locals (assembly, mb->ilgen); /* * FIXME: need to set also the header size in fat_flags. * (and more sects and init locals flags) */ fat_flags = 0x03; if (num_exception) fat_flags |= METHOD_HEADER_MORE_SECTS; if (mb->init_locals) fat_flags |= METHOD_HEADER_INIT_LOCALS; fat_header [0] = fat_flags; fat_header [1] = (header_size / 4 ) << 4; short_value = GUINT16_TO_LE (max_stack); memcpy (fat_header + 2, &short_value, 2); int_value = GUINT32_TO_LE (code_size); memcpy (fat_header + 4, &int_value, 4); int_value = GUINT32_TO_LE (local_sig); memcpy (fat_header + 8, &int_value, 4); idx = mono_image_add_stream_data (&assembly->code, fat_header, 12); /* add to the fixup todo list */ if (mb->ilgen && mb->ilgen->num_token_fixups) mono_g_hash_table_insert (assembly->token_fixups, mb->ilgen, GUINT_TO_POINTER (idx + 12)); mono_image_add_stream_data (&assembly->code, mono_array_addr (code, char, 0), code_size); if (num_exception) { unsigned char sheader [4]; MonoILExceptionInfo * ex_info; MonoILExceptionBlock * ex_block; int j; stream_data_align (&assembly->code); /* always use fat format for now */ sheader [0] = METHOD_HEADER_SECTION_FAT_FORMAT | METHOD_HEADER_SECTION_EHTABLE; num_exception *= 6 * sizeof (guint32); num_exception += 4; /* include the size of the header */ sheader [1] = num_exception & 0xff; sheader [2] = (num_exception >> 8) & 0xff; sheader [3] = (num_exception >> 16) & 0xff; mono_image_add_stream_data (&assembly->code, (char*)sheader, 4); /* fat header, so we are already aligned */ /* reverse order */ for (i = mono_array_length (mb->ilgen->ex_handlers) - 1; i >= 0; --i) { ex_info = (MonoILExceptionInfo *)mono_array_addr (mb->ilgen->ex_handlers, MonoILExceptionInfo, i); if (ex_info->handlers) { int finally_start = ex_info->start + ex_info->len; for (j = 0; j < mono_array_length (ex_info->handlers); ++j) { guint32 val; ex_block = (MonoILExceptionBlock*)mono_array_addr (ex_info->handlers, MonoILExceptionBlock, j); /* the flags */ val = GUINT32_TO_LE (ex_block->type); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /* try offset */ val = GUINT32_TO_LE (ex_info->start); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /* need fault, too, probably */ if (ex_block->type == MONO_EXCEPTION_CLAUSE_FINALLY) val = GUINT32_TO_LE (finally_start - ex_info->start); else val = GUINT32_TO_LE (ex_info->len); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /* handler offset */ val = GUINT32_TO_LE (ex_block->start); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /* handler len */ val = GUINT32_TO_LE (ex_block->len); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); finally_start = ex_block->start + ex_block->len; if (ex_block->extype) { val = mono_metadata_token_from_dor (mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)ex_block->extype))); } else { if (ex_block->type == MONO_EXCEPTION_CLAUSE_FILTER) val = ex_block->filter_offset; else val = 0; } val = GUINT32_TO_LE (val); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /*g_print ("out clause %d: from %d len=%d, handler at %d, %d, finally_start=%d, ex_info->start=%d, ex_info->len=%d, ex_block->type=%d, j=%d, i=%d\n", clause.flags, clause.try_offset, clause.try_len, clause.handler_offset, clause.handler_len, finally_start, ex_info->start, ex_info->len, ex_block->type, j, i);*/ } } else { g_error ("No clauses for ex info block %d", i); } } } return assembly->text_rva + idx; } static guint32 find_index_in_table (MonoDynamicImage *assembly, int table_idx, int col, guint32 token) { int i; MonoDynamicTable *table; guint32 *values; table = &assembly->tables [table_idx]; g_assert (col < table->columns); values = table->values + table->columns; for (i = 1; i <= table->rows; ++i) { if (values [col] == token) return i; values += table->columns; } return 0; } /* * LOCKING: Acquires the loader lock. */ static MonoCustomAttrInfo* lookup_custom_attr (MonoImage *image, gpointer member) { MonoCustomAttrInfo* res; res = mono_image_property_lookup (image, member, MONO_PROP_DYNAMIC_CATTR); if (!res) return NULL; return g_memdup (res, MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * res->num_attrs); } static gboolean custom_attr_visible (MonoImage *image, MonoReflectionCustomAttr *cattr) { /* FIXME: Need to do more checks */ if (cattr->ctor->method && (cattr->ctor->method->klass->image != image)) { int visibility = cattr->ctor->method->klass->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK; if ((visibility != TYPE_ATTRIBUTE_PUBLIC) && (visibility != TYPE_ATTRIBUTE_NESTED_PUBLIC)) return FALSE; } return TRUE; } static MonoCustomAttrInfo* mono_custom_attrs_from_builders (MonoImage *alloc_img, MonoImage *image, MonoArray *cattrs) { int i, index, count, not_visible; MonoCustomAttrInfo *ainfo; MonoReflectionCustomAttr *cattr; if (!cattrs) return NULL; /* FIXME: check in assembly the Run flag is set */ count = mono_array_length (cattrs); /* Skip nonpublic attributes since MS.NET seems to do the same */ /* FIXME: This needs to be done more globally */ not_visible = 0; for (i = 0; i < count; ++i) { cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i); if (!custom_attr_visible (image, cattr)) not_visible ++; } count -= not_visible; ainfo = image_g_malloc0 (alloc_img, MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * count); ainfo->image = image; ainfo->num_attrs = count; ainfo->cached = alloc_img != NULL; index = 0; for (i = 0; i < count; ++i) { cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i); if (custom_attr_visible (image, cattr)) { unsigned char *saved = mono_image_alloc (image, mono_array_length (cattr->data)); memcpy (saved, mono_array_addr (cattr->data, char, 0), mono_array_length (cattr->data)); ainfo->attrs [index].ctor = cattr->ctor->method; ainfo->attrs [index].data = saved; ainfo->attrs [index].data_size = mono_array_length (cattr->data); index ++; } } return ainfo; } #ifndef DISABLE_REFLECTION_EMIT /* * LOCKING: Acquires the loader lock. */ static void mono_save_custom_attrs (MonoImage *image, void *obj, MonoArray *cattrs) { MonoCustomAttrInfo *ainfo, *tmp; if (!cattrs || !mono_array_length (cattrs)) return; ainfo = mono_custom_attrs_from_builders (image, image, cattrs); mono_loader_lock (); tmp = mono_image_property_lookup (image, obj, MONO_PROP_DYNAMIC_CATTR); if (tmp) mono_custom_attrs_free (tmp); mono_image_property_insert (image, obj, MONO_PROP_DYNAMIC_CATTR, ainfo); mono_loader_unlock (); } #endif void mono_custom_attrs_free (MonoCustomAttrInfo *ainfo) { if (!ainfo->cached) g_free (ainfo); } /* * idx is the table index of the object * type is one of MONO_CUSTOM_ATTR_* */ static void mono_image_add_cattrs (MonoDynamicImage *assembly, guint32 idx, guint32 type, MonoArray *cattrs) { MonoDynamicTable *table; MonoReflectionCustomAttr *cattr; guint32 *values; guint32 count, i, token; char blob_size [6]; char *p = blob_size; /* it is legal to pass a NULL cattrs: we avoid to use the if in a lot of places */ if (!cattrs) return; count = mono_array_length (cattrs); table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE]; table->rows += count; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_CUSTOM_ATTR_SIZE; idx <<= MONO_CUSTOM_ATTR_BITS; idx |= type; for (i = 0; i < count; ++i) { cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i); values [MONO_CUSTOM_ATTR_PARENT] = idx; token = mono_image_create_token (assembly, (MonoObject*)cattr->ctor, FALSE, FALSE); type = mono_metadata_token_index (token); type <<= MONO_CUSTOM_ATTR_TYPE_BITS; switch (mono_metadata_token_table (token)) { case MONO_TABLE_METHOD: type |= MONO_CUSTOM_ATTR_TYPE_METHODDEF; break; case MONO_TABLE_MEMBERREF: type |= MONO_CUSTOM_ATTR_TYPE_MEMBERREF; break; default: g_warning ("got wrong token in custom attr"); continue; } values [MONO_CUSTOM_ATTR_TYPE] = type; p = blob_size; mono_metadata_encode_value (mono_array_length (cattr->data), p, &p); values [MONO_CUSTOM_ATTR_VALUE] = add_to_blob_cached (assembly, blob_size, p - blob_size, mono_array_addr (cattr->data, char, 0), mono_array_length (cattr->data)); values += MONO_CUSTOM_ATTR_SIZE; ++table->next_idx; } } static void mono_image_add_decl_security (MonoDynamicImage *assembly, guint32 parent_token, MonoArray *permissions) { MonoDynamicTable *table; guint32 *values; guint32 count, i, idx; MonoReflectionPermissionSet *perm; if (!permissions) return; count = mono_array_length (permissions); table = &assembly->tables [MONO_TABLE_DECLSECURITY]; table->rows += count; alloc_table (table, table->rows); for (i = 0; i < mono_array_length (permissions); ++i) { perm = (MonoReflectionPermissionSet*)mono_array_addr (permissions, MonoReflectionPermissionSet, i); values = table->values + table->next_idx * MONO_DECL_SECURITY_SIZE; idx = mono_metadata_token_index (parent_token); idx <<= MONO_HAS_DECL_SECURITY_BITS; switch (mono_metadata_token_table (parent_token)) { case MONO_TABLE_TYPEDEF: idx |= MONO_HAS_DECL_SECURITY_TYPEDEF; break; case MONO_TABLE_METHOD: idx |= MONO_HAS_DECL_SECURITY_METHODDEF; break; case MONO_TABLE_ASSEMBLY: idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY; break; default: g_assert_not_reached (); } values [MONO_DECL_SECURITY_ACTION] = perm->action; values [MONO_DECL_SECURITY_PARENT] = idx; values [MONO_DECL_SECURITY_PERMISSIONSET] = add_mono_string_to_blob_cached (assembly, perm->pset); ++table->next_idx; } } /* * Fill in the MethodDef and ParamDef tables for a method. * This is used for both normal methods and constructors. */ static void mono_image_basic_method (ReflectionMethodBuilder *mb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint i, count; /* room in this table is already allocated */ table = &assembly->tables [MONO_TABLE_METHOD]; *mb->table_idx = table->next_idx ++; g_hash_table_insert (assembly->method_to_table_idx, mb->mhandle, GUINT_TO_POINTER ((*mb->table_idx))); values = table->values + *mb->table_idx * MONO_METHOD_SIZE; values [MONO_METHOD_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->name); values [MONO_METHOD_FLAGS] = mb->attrs; values [MONO_METHOD_IMPLFLAGS] = mb->iattrs; values [MONO_METHOD_SIGNATURE] = method_builder_encode_signature (assembly, mb); values [MONO_METHOD_RVA] = method_encode_code (assembly, mb); table = &assembly->tables [MONO_TABLE_PARAM]; values [MONO_METHOD_PARAMLIST] = table->next_idx; mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_METHOD, *mb->table_idx), mb->permissions); if (mb->pinfo) { MonoDynamicTable *mtable; guint32 *mvalues; mtable = &assembly->tables [MONO_TABLE_FIELDMARSHAL]; mvalues = mtable->values + mtable->next_idx * MONO_FIELD_MARSHAL_SIZE; count = 0; for (i = 0; i < mono_array_length (mb->pinfo); ++i) { if (mono_array_get (mb->pinfo, gpointer, i)) count++; } table->rows += count; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_PARAM_SIZE; for (i = 0; i < mono_array_length (mb->pinfo); ++i) { MonoReflectionParamBuilder *pb; if ((pb = mono_array_get (mb->pinfo, MonoReflectionParamBuilder*, i))) { values [MONO_PARAM_FLAGS] = pb->attrs; values [MONO_PARAM_SEQUENCE] = i; if (pb->name != NULL) { values [MONO_PARAM_NAME] = string_heap_insert_mstring (&assembly->sheap, pb->name); } else { values [MONO_PARAM_NAME] = 0; } values += MONO_PARAM_SIZE; if (pb->marshal_info) { mtable->rows++; alloc_table (mtable, mtable->rows); mvalues = mtable->values + mtable->rows * MONO_FIELD_MARSHAL_SIZE; mvalues [MONO_FIELD_MARSHAL_PARENT] = (table->next_idx << MONO_HAS_FIELD_MARSHAL_BITS) | MONO_HAS_FIELD_MARSHAL_PARAMDEF; mvalues [MONO_FIELD_MARSHAL_NATIVE_TYPE] = encode_marshal_blob (assembly, pb->marshal_info); } pb->table_idx = table->next_idx++; if (pb->attrs & PARAM_ATTRIBUTE_HAS_DEFAULT) { guint32 field_type = 0; mtable = &assembly->tables [MONO_TABLE_CONSTANT]; mtable->rows ++; alloc_table (mtable, mtable->rows); mvalues = mtable->values + mtable->rows * MONO_CONSTANT_SIZE; mvalues [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_PARAM | (pb->table_idx << MONO_HASCONSTANT_BITS); mvalues [MONO_CONSTANT_VALUE] = encode_constant (assembly, pb->def_value, &field_type); mvalues [MONO_CONSTANT_TYPE] = field_type; mvalues [MONO_CONSTANT_PADDING] = 0; } } } } } #ifndef DISABLE_REFLECTION_EMIT static void reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb) { memset (rmb, 0, sizeof (ReflectionMethodBuilder)); rmb->ilgen = mb->ilgen; rmb->rtype = mono_reflection_type_resolve_user_types ((MonoReflectionType*)mb->rtype); rmb->parameters = mb->parameters; rmb->generic_params = mb->generic_params; rmb->generic_container = mb->generic_container; rmb->opt_types = NULL; rmb->pinfo = mb->pinfo; rmb->attrs = mb->attrs; rmb->iattrs = mb->iattrs; rmb->call_conv = mb->call_conv; rmb->code = mb->code; rmb->type = mb->type; rmb->name = mb->name; rmb->table_idx = &mb->table_idx; rmb->init_locals = mb->init_locals; rmb->skip_visibility = FALSE; rmb->return_modreq = mb->return_modreq; rmb->return_modopt = mb->return_modopt; rmb->param_modreq = mb->param_modreq; rmb->param_modopt = mb->param_modopt; rmb->permissions = mb->permissions; rmb->mhandle = mb->mhandle; rmb->nrefs = 0; rmb->refs = NULL; if (mb->dll) { rmb->charset = mb->charset; rmb->extra_flags = mb->extra_flags; rmb->native_cc = mb->native_cc; rmb->dllentry = mb->dllentry; rmb->dll = mb->dll; } } static void reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb) { const char *name = mb->attrs & METHOD_ATTRIBUTE_STATIC ? ".cctor": ".ctor"; memset (rmb, 0, sizeof (ReflectionMethodBuilder)); rmb->ilgen = mb->ilgen; rmb->rtype = mono_type_get_object (mono_domain_get (), &mono_defaults.void_class->byval_arg); rmb->parameters = mb->parameters; rmb->generic_params = NULL; rmb->generic_container = NULL; rmb->opt_types = NULL; rmb->pinfo = mb->pinfo; rmb->attrs = mb->attrs; rmb->iattrs = mb->iattrs; rmb->call_conv = mb->call_conv; rmb->code = NULL; rmb->type = mb->type; rmb->name = mono_string_new (mono_domain_get (), name); rmb->table_idx = &mb->table_idx; rmb->init_locals = mb->init_locals; rmb->skip_visibility = FALSE; rmb->return_modreq = NULL; rmb->return_modopt = NULL; rmb->param_modreq = mb->param_modreq; rmb->param_modopt = mb->param_modopt; rmb->permissions = mb->permissions; rmb->mhandle = mb->mhandle; rmb->nrefs = 0; rmb->refs = NULL; } static void reflection_methodbuilder_from_dynamic_method (ReflectionMethodBuilder *rmb, MonoReflectionDynamicMethod *mb) { memset (rmb, 0, sizeof (ReflectionMethodBuilder)); rmb->ilgen = mb->ilgen; rmb->rtype = mb->rtype; rmb->parameters = mb->parameters; rmb->generic_params = NULL; rmb->generic_container = NULL; rmb->opt_types = NULL; rmb->pinfo = NULL; rmb->attrs = mb->attrs; rmb->iattrs = 0; rmb->call_conv = mb->call_conv; rmb->code = NULL; rmb->type = (MonoObject *) mb->owner; rmb->name = mb->name; rmb->table_idx = NULL; rmb->init_locals = mb->init_locals; rmb->skip_visibility = mb->skip_visibility; rmb->return_modreq = NULL; rmb->return_modopt = NULL; rmb->param_modreq = NULL; rmb->param_modopt = NULL; rmb->permissions = NULL; rmb->mhandle = mb->mhandle; rmb->nrefs = 0; rmb->refs = NULL; } #endif static void mono_image_add_methodimpl (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)mb->type; MonoDynamicTable *table; guint32 *values; guint32 tok; if (!mb->override_method) return; table = &assembly->tables [MONO_TABLE_METHODIMPL]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_METHODIMPL_SIZE; values [MONO_METHODIMPL_CLASS] = tb->table_idx; values [MONO_METHODIMPL_BODY] = MONO_METHODDEFORREF_METHODDEF | (mb->table_idx << MONO_METHODDEFORREF_BITS); tok = mono_image_create_token (assembly, (MonoObject*)mb->override_method, FALSE, FALSE); switch (mono_metadata_token_table (tok)) { case MONO_TABLE_MEMBERREF: tok = (mono_metadata_token_index (tok) << MONO_METHODDEFORREF_BITS ) | MONO_METHODDEFORREF_METHODREF; break; case MONO_TABLE_METHOD: tok = (mono_metadata_token_index (tok) << MONO_METHODDEFORREF_BITS ) | MONO_METHODDEFORREF_METHODDEF; break; default: g_assert_not_reached (); } values [MONO_METHODIMPL_DECLARATION] = tok; } #ifndef DISABLE_REFLECTION_EMIT static void mono_image_get_method_info (MonoReflectionMethodBuilder *mb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; ReflectionMethodBuilder rmb; int i; reflection_methodbuilder_from_method_builder (&rmb, mb); mono_image_basic_method (&rmb, assembly); mb->table_idx = *rmb.table_idx; if (mb->dll) { /* It's a P/Invoke method */ guint32 moduleref; /* map CharSet values to on-disk values */ int ncharset = (mb->charset ? (mb->charset - 1) * 2 : 0); int extra_flags = mb->extra_flags; table = &assembly->tables [MONO_TABLE_IMPLMAP]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_IMPLMAP_SIZE; values [MONO_IMPLMAP_FLAGS] = (mb->native_cc << 8) | ncharset | extra_flags; values [MONO_IMPLMAP_MEMBER] = (mb->table_idx << 1) | 1; /* memberforwarded: method */ if (mb->dllentry) values [MONO_IMPLMAP_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->dllentry); else values [MONO_IMPLMAP_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->name); moduleref = string_heap_insert_mstring (&assembly->sheap, mb->dll); if (!(values [MONO_IMPLMAP_SCOPE] = find_index_in_table (assembly, MONO_TABLE_MODULEREF, MONO_MODULEREF_NAME, moduleref))) { table = &assembly->tables [MONO_TABLE_MODULEREF]; table->rows ++; alloc_table (table, table->rows); table->values [table->rows * MONO_MODULEREF_SIZE + MONO_MODULEREF_NAME] = moduleref; values [MONO_IMPLMAP_SCOPE] = table->rows; } } if (mb->generic_params) { table = &assembly->tables [MONO_TABLE_GENERICPARAM]; table->rows += mono_array_length (mb->generic_params); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (mb->generic_params); ++i) { guint32 owner = MONO_TYPEORMETHOD_METHOD | (mb->table_idx << MONO_TYPEORMETHOD_BITS); mono_image_get_generic_param_info ( mono_array_get (mb->generic_params, gpointer, i), owner, assembly); } } } static void mono_image_get_ctor_info (MonoDomain *domain, MonoReflectionCtorBuilder *mb, MonoDynamicImage *assembly) { ReflectionMethodBuilder rmb; reflection_methodbuilder_from_ctor_builder (&rmb, mb); mono_image_basic_method (&rmb, assembly); mb->table_idx = *rmb.table_idx; } #endif static char* type_get_fully_qualified_name (MonoType *type) { return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED); } static char* type_get_qualified_name (MonoType *type, MonoAssembly *ass) { MonoClass *klass; MonoAssembly *ta; klass = mono_class_from_mono_type (type); if (!klass) return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_REFLECTION); ta = klass->image->assembly; if (ta->dynamic || (ta == ass)) { if (klass->generic_class || klass->generic_container) /* For generic type definitions, we want T, while REFLECTION returns T<K> */ return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_FULL_NAME); else return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_REFLECTION); } return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED); } #ifndef DISABLE_REFLECTION_EMIT /*field_image is the image to which the eventual custom mods have been encoded against*/ static guint32 fieldref_encode_signature (MonoDynamicImage *assembly, MonoImage *field_image, MonoType *type) { SigBuffer buf; guint32 idx, i, token; if (!assembly->save) return 0; sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x06); /* encode custom attributes before the type */ if (type->num_mods) { for (i = 0; i < type->num_mods; ++i) { if (field_image) { MonoClass *class = mono_class_get (field_image, type->modifiers [i].token); g_assert (class); token = mono_image_typedef_or_ref (assembly, &class->byval_arg); } else { token = type->modifiers [i].token; } if (type->modifiers [i].required) sigbuffer_add_byte (&buf, MONO_TYPE_CMOD_REQD); else sigbuffer_add_byte (&buf, MONO_TYPE_CMOD_OPT); sigbuffer_add_value (&buf, token); } } encode_type (assembly, type, &buf); idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } #endif static guint32 field_encode_signature (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb) { SigBuffer buf; guint32 idx; sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x06); encode_custom_modifiers (assembly, fb->modreq, fb->modopt, &buf); /* encode custom attributes before the type */ encode_reflection_type (assembly, (MonoReflectionType*)fb->type, &buf); idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 encode_constant (MonoDynamicImage *assembly, MonoObject *val, guint32 *ret_type) { char blob_size [64]; char *b = blob_size; char *p, *box_val; char* buf; guint32 idx = 0, len = 0, dummy = 0; #ifdef ARM_FPU_FPA #if G_BYTE_ORDER == G_LITTLE_ENDIAN guint32 fpa_double [2]; guint32 *fpa_p; #endif #endif p = buf = g_malloc (64); if (!val) { *ret_type = MONO_TYPE_CLASS; len = 4; box_val = (char*)&dummy; } else { box_val = ((char*)val) + sizeof (MonoObject); *ret_type = val->vtable->klass->byval_arg.type; } handle_enum: switch (*ret_type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: case MONO_TYPE_I1: len = 1; break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: case MONO_TYPE_I2: len = 2; break; case MONO_TYPE_U4: case MONO_TYPE_I4: case MONO_TYPE_R4: len = 4; break; case MONO_TYPE_U8: case MONO_TYPE_I8: len = 8; break; case MONO_TYPE_R8: len = 8; #ifdef ARM_FPU_FPA #if G_BYTE_ORDER == G_LITTLE_ENDIAN fpa_p = (guint32*)box_val; fpa_double [0] = fpa_p [1]; fpa_double [1] = fpa_p [0]; box_val = (char*)fpa_double; #endif #endif break; case MONO_TYPE_VALUETYPE: if (val->vtable->klass->enumtype) { *ret_type = mono_class_enum_basetype (val->vtable->klass)->type; goto handle_enum; } else g_error ("we can't encode valuetypes"); case MONO_TYPE_CLASS: break; case MONO_TYPE_STRING: { MonoString *str = (MonoString*)val; /* there is no signature */ len = str->length * 2; mono_metadata_encode_value (len, b, &b); #if G_BYTE_ORDER != G_LITTLE_ENDIAN { char *swapped = g_malloc (2 * mono_string_length (str)); const char *p = (const char*)mono_string_chars (str); swap_with_size (swapped, p, 2, mono_string_length (str)); idx = add_to_blob_cached (assembly, blob_size, b-blob_size, swapped, len); g_free (swapped); } #else idx = add_to_blob_cached (assembly, blob_size, b-blob_size, (char*)mono_string_chars (str), len); #endif g_free (buf); return idx; } case MONO_TYPE_GENERICINST: *ret_type = val->vtable->klass->generic_class->container_class->byval_arg.type; goto handle_enum; default: g_error ("we don't encode constant type 0x%02x yet", *ret_type); } /* there is no signature */ mono_metadata_encode_value (len, b, &b); #if G_BYTE_ORDER != G_LITTLE_ENDIAN idx = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size); swap_with_size (blob_size, box_val, len, 1); mono_image_add_stream_data (&assembly->blob, blob_size, len); #else idx = add_to_blob_cached (assembly, blob_size, b-blob_size, box_val, len); #endif g_free (buf); return idx; } static guint32 encode_marshal_blob (MonoDynamicImage *assembly, MonoReflectionMarshal *minfo) { char *str; SigBuffer buf; guint32 idx, len; sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, minfo->type); switch (minfo->type) { case MONO_NATIVE_BYVALTSTR: case MONO_NATIVE_BYVALARRAY: sigbuffer_add_value (&buf, minfo->count); break; case MONO_NATIVE_LPARRAY: if (minfo->eltype || minfo->has_size) { sigbuffer_add_value (&buf, minfo->eltype); if (minfo->has_size) { sigbuffer_add_value (&buf, minfo->param_num != -1? minfo->param_num: 0); sigbuffer_add_value (&buf, minfo->count != -1? minfo->count: 0); /* LAMESPEC: ElemMult is undocumented */ sigbuffer_add_value (&buf, minfo->param_num != -1? 1: 0); } } break; case MONO_NATIVE_SAFEARRAY: if (minfo->eltype) sigbuffer_add_value (&buf, minfo->eltype); break; case MONO_NATIVE_CUSTOM: if (minfo->guid) { str = mono_string_to_utf8 (minfo->guid); len = strlen (str); sigbuffer_add_value (&buf, len); sigbuffer_add_mem (&buf, str, len); g_free (str); } else { sigbuffer_add_value (&buf, 0); } /* native type name */ sigbuffer_add_value (&buf, 0); /* custom marshaler type name */ if (minfo->marshaltype || minfo->marshaltyperef) { if (minfo->marshaltyperef) str = type_get_fully_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)minfo->marshaltyperef)); else str = mono_string_to_utf8 (minfo->marshaltype); len = strlen (str); sigbuffer_add_value (&buf, len); sigbuffer_add_mem (&buf, str, len); g_free (str); } else { /* FIXME: Actually a bug, since this field is required. Punting for now ... */ sigbuffer_add_value (&buf, 0); } if (minfo->mcookie) { str = mono_string_to_utf8 (minfo->mcookie); len = strlen (str); sigbuffer_add_value (&buf, len); sigbuffer_add_mem (&buf, str, len); g_free (str); } else { sigbuffer_add_value (&buf, 0); } break; default: break; } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static void mono_image_get_field_info (MonoReflectionFieldBuilder *fb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; /* maybe this fixup should be done in the C# code */ if (fb->attrs & FIELD_ATTRIBUTE_LITERAL) fb->attrs |= FIELD_ATTRIBUTE_HAS_DEFAULT; table = &assembly->tables [MONO_TABLE_FIELD]; fb->table_idx = table->next_idx ++; g_hash_table_insert (assembly->field_to_table_idx, fb->handle, GUINT_TO_POINTER (fb->table_idx)); values = table->values + fb->table_idx * MONO_FIELD_SIZE; values [MONO_FIELD_NAME] = string_heap_insert_mstring (&assembly->sheap, fb->name); values [MONO_FIELD_FLAGS] = fb->attrs; values [MONO_FIELD_SIGNATURE] = field_encode_signature (assembly, fb); if (fb->offset != -1) { table = &assembly->tables [MONO_TABLE_FIELDLAYOUT]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_FIELD_LAYOUT_SIZE; values [MONO_FIELD_LAYOUT_FIELD] = fb->table_idx; values [MONO_FIELD_LAYOUT_OFFSET] = fb->offset; } if (fb->attrs & FIELD_ATTRIBUTE_LITERAL) { guint32 field_type = 0; table = &assembly->tables [MONO_TABLE_CONSTANT]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_CONSTANT_SIZE; values [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_FIEDDEF | (fb->table_idx << MONO_HASCONSTANT_BITS); values [MONO_CONSTANT_VALUE] = encode_constant (assembly, fb->def_value, &field_type); values [MONO_CONSTANT_TYPE] = field_type; values [MONO_CONSTANT_PADDING] = 0; } if (fb->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) { guint32 rva_idx; table = &assembly->tables [MONO_TABLE_FIELDRVA]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_FIELD_RVA_SIZE; values [MONO_FIELD_RVA_FIELD] = fb->table_idx; /* * We store it in the code section because it's simpler for now. */ if (fb->rva_data) { if (mono_array_length (fb->rva_data) >= 10) stream_data_align (&assembly->code); rva_idx = mono_image_add_stream_data (&assembly->code, mono_array_addr (fb->rva_data, char, 0), mono_array_length (fb->rva_data)); } else rva_idx = mono_image_add_stream_zero (&assembly->code, mono_class_value_size (fb->handle->parent, NULL)); values [MONO_FIELD_RVA_RVA] = rva_idx + assembly->text_rva; } if (fb->marshal_info) { table = &assembly->tables [MONO_TABLE_FIELDMARSHAL]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_FIELD_MARSHAL_SIZE; values [MONO_FIELD_MARSHAL_PARENT] = (fb->table_idx << MONO_HAS_FIELD_MARSHAL_BITS) | MONO_HAS_FIELD_MARSHAL_FIELDSREF; values [MONO_FIELD_MARSHAL_NATIVE_TYPE] = encode_marshal_blob (assembly, fb->marshal_info); } } static guint32 property_encode_signature (MonoDynamicImage *assembly, MonoReflectionPropertyBuilder *fb) { SigBuffer buf; guint32 nparams = 0; MonoReflectionMethodBuilder *mb = fb->get_method; MonoReflectionMethodBuilder *smb = fb->set_method; guint32 idx, i; if (mb && mb->parameters) nparams = mono_array_length (mb->parameters); if (!mb && smb && smb->parameters) nparams = mono_array_length (smb->parameters) - 1; sigbuffer_init (&buf, 32); sigbuffer_add_byte (&buf, 0x08); sigbuffer_add_value (&buf, nparams); if (mb) { encode_reflection_type (assembly, (MonoReflectionType*)mb->rtype, &buf); for (i = 0; i < nparams; ++i) { MonoReflectionType *pt = mono_array_get (mb->parameters, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } } else if (smb && smb->parameters) { /* the property type is the last param */ encode_reflection_type (assembly, mono_array_get (smb->parameters, MonoReflectionType*, nparams), &buf); for (i = 0; i < nparams; ++i) { MonoReflectionType *pt = mono_array_get (smb->parameters, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } } else { encode_reflection_type (assembly, (MonoReflectionType*)fb->type, &buf); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static void mono_image_get_property_info (MonoReflectionPropertyBuilder *pb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint num_methods = 0; guint32 semaidx; /* * we need to set things in the following tables: * PROPERTYMAP (info already filled in _get_type_info ()) * PROPERTY (rows already preallocated in _get_type_info ()) * METHOD (method info already done with the generic method code) * METHODSEMANTICS */ table = &assembly->tables [MONO_TABLE_PROPERTY]; pb->table_idx = table->next_idx ++; values = table->values + pb->table_idx * MONO_PROPERTY_SIZE; values [MONO_PROPERTY_NAME] = string_heap_insert_mstring (&assembly->sheap, pb->name); values [MONO_PROPERTY_FLAGS] = pb->attrs; values [MONO_PROPERTY_TYPE] = property_encode_signature (assembly, pb); /* FIXME: we still don't handle 'other' methods */ if (pb->get_method) num_methods ++; if (pb->set_method) num_methods ++; table = &assembly->tables [MONO_TABLE_METHODSEMANTICS]; table->rows += num_methods; alloc_table (table, table->rows); if (pb->get_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_GETTER; values [MONO_METHOD_SEMA_METHOD] = pb->get_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (pb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_PROPERTY; } if (pb->set_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_SETTER; values [MONO_METHOD_SEMA_METHOD] = pb->set_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (pb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_PROPERTY; } } static void mono_image_get_event_info (MonoReflectionEventBuilder *eb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint num_methods = 0; guint32 semaidx; /* * we need to set things in the following tables: * EVENTMAP (info already filled in _get_type_info ()) * EVENT (rows already preallocated in _get_type_info ()) * METHOD (method info already done with the generic method code) * METHODSEMANTICS */ table = &assembly->tables [MONO_TABLE_EVENT]; eb->table_idx = table->next_idx ++; values = table->values + eb->table_idx * MONO_EVENT_SIZE; values [MONO_EVENT_NAME] = string_heap_insert_mstring (&assembly->sheap, eb->name); values [MONO_EVENT_FLAGS] = eb->attrs; values [MONO_EVENT_TYPE] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (eb->type)); /* * FIXME: we still don't handle 'other' methods */ if (eb->add_method) num_methods ++; if (eb->remove_method) num_methods ++; if (eb->raise_method) num_methods ++; table = &assembly->tables [MONO_TABLE_METHODSEMANTICS]; table->rows += num_methods; alloc_table (table, table->rows); if (eb->add_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_ADD_ON; values [MONO_METHOD_SEMA_METHOD] = eb->add_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT; } if (eb->remove_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_REMOVE_ON; values [MONO_METHOD_SEMA_METHOD] = eb->remove_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT; } if (eb->raise_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_FIRE; values [MONO_METHOD_SEMA_METHOD] = eb->raise_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT; } } static void encode_constraints (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 num_constraints, i; guint32 *values; guint32 table_idx; table = &assembly->tables [MONO_TABLE_GENERICPARAMCONSTRAINT]; num_constraints = gparam->iface_constraints ? mono_array_length (gparam->iface_constraints) : 0; table->rows += num_constraints; if (gparam->base_type) table->rows++; alloc_table (table, table->rows); if (gparam->base_type) { table_idx = table->next_idx ++; values = table->values + table_idx * MONO_GENPARCONSTRAINT_SIZE; values [MONO_GENPARCONSTRAINT_GENERICPAR] = owner; values [MONO_GENPARCONSTRAINT_CONSTRAINT] = mono_image_typedef_or_ref ( assembly, mono_reflection_type_get_handle (gparam->base_type)); } for (i = 0; i < num_constraints; i++) { MonoReflectionType *constraint = mono_array_get ( gparam->iface_constraints, gpointer, i); table_idx = table->next_idx ++; values = table->values + table_idx * MONO_GENPARCONSTRAINT_SIZE; values [MONO_GENPARCONSTRAINT_GENERICPAR] = owner; values [MONO_GENPARCONSTRAINT_CONSTRAINT] = mono_image_typedef_or_ref ( assembly, mono_reflection_type_get_handle (constraint)); } } static void mono_image_get_generic_param_info (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly) { GenericParamTableEntry *entry; /* * The GenericParam table must be sorted according to the `owner' field. * We need to do this sorting prior to writing the GenericParamConstraint * table, since we have to use the final GenericParam table indices there * and they must also be sorted. */ entry = g_new0 (GenericParamTableEntry, 1); entry->owner = owner; /* FIXME: track where gen_params should be freed and remove the GC root as well */ MOVING_GC_REGISTER (&entry->gparam); entry->gparam = gparam; g_ptr_array_add (assembly->gen_params, entry); } static void write_generic_param_entry (MonoDynamicImage *assembly, GenericParamTableEntry *entry) { MonoDynamicTable *table; MonoGenericParam *param; guint32 *values; guint32 table_idx; table = &assembly->tables [MONO_TABLE_GENERICPARAM]; table_idx = table->next_idx ++; values = table->values + table_idx * MONO_GENERICPARAM_SIZE; param = mono_reflection_type_get_handle ((MonoReflectionType*)entry->gparam)->data.generic_param; values [MONO_GENERICPARAM_OWNER] = entry->owner; values [MONO_GENERICPARAM_FLAGS] = entry->gparam->attrs; values [MONO_GENERICPARAM_NUMBER] = mono_generic_param_num (param); values [MONO_GENERICPARAM_NAME] = string_heap_insert (&assembly->sheap, mono_generic_param_info (param)->name); mono_image_add_cattrs (assembly, table_idx, MONO_CUSTOM_ATTR_GENERICPAR, entry->gparam->cattrs); encode_constraints (entry->gparam, table_idx, assembly); } static guint32 resolution_scope_from_image (MonoDynamicImage *assembly, MonoImage *image) { MonoDynamicTable *table; guint32 token; guint32 *values; guint32 cols [MONO_ASSEMBLY_SIZE]; const char *pubkey; guint32 publen; if ((token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, image)))) return token; if (image->assembly->dynamic && (image->assembly == assembly->image.assembly)) { table = &assembly->tables [MONO_TABLE_MODULEREF]; token = table->next_idx ++; table->rows ++; alloc_table (table, table->rows); values = table->values + token * MONO_MODULEREF_SIZE; values [MONO_MODULEREF_NAME] = string_heap_insert (&assembly->sheap, image->module_name); token <<= MONO_RESOLTION_SCOPE_BITS; token |= MONO_RESOLTION_SCOPE_MODULEREF; g_hash_table_insert (assembly->handleref, image, GUINT_TO_POINTER (token)); return token; } if (image->assembly->dynamic) /* FIXME: */ memset (cols, 0, sizeof (cols)); else { /* image->assembly->image is the manifest module */ image = image->assembly->image; mono_metadata_decode_row (&image->tables [MONO_TABLE_ASSEMBLY], 0, cols, MONO_ASSEMBLY_SIZE); } table = &assembly->tables [MONO_TABLE_ASSEMBLYREF]; token = table->next_idx ++; table->rows ++; alloc_table (table, table->rows); values = table->values + token * MONO_ASSEMBLYREF_SIZE; values [MONO_ASSEMBLYREF_NAME] = string_heap_insert (&assembly->sheap, image->assembly_name); values [MONO_ASSEMBLYREF_MAJOR_VERSION] = cols [MONO_ASSEMBLY_MAJOR_VERSION]; values [MONO_ASSEMBLYREF_MINOR_VERSION] = cols [MONO_ASSEMBLY_MINOR_VERSION]; values [MONO_ASSEMBLYREF_BUILD_NUMBER] = cols [MONO_ASSEMBLY_BUILD_NUMBER]; values [MONO_ASSEMBLYREF_REV_NUMBER] = cols [MONO_ASSEMBLY_REV_NUMBER]; values [MONO_ASSEMBLYREF_FLAGS] = 0; values [MONO_ASSEMBLYREF_CULTURE] = 0; values [MONO_ASSEMBLYREF_HASH_VALUE] = 0; if (strcmp ("", image->assembly->aname.culture)) { values [MONO_ASSEMBLYREF_CULTURE] = string_heap_insert (&assembly->sheap, image->assembly->aname.culture); } if ((pubkey = mono_image_get_public_key (image, &publen))) { guchar pubtoken [9]; pubtoken [0] = 8; mono_digest_get_public_token (pubtoken + 1, (guchar*)pubkey, publen); values [MONO_ASSEMBLYREF_PUBLIC_KEY] = mono_image_add_stream_data (&assembly->blob, (char*)pubtoken, 9); } else { values [MONO_ASSEMBLYREF_PUBLIC_KEY] = 0; } token <<= MONO_RESOLTION_SCOPE_BITS; token |= MONO_RESOLTION_SCOPE_ASSEMBLYREF; g_hash_table_insert (assembly->handleref, image, GUINT_TO_POINTER (token)); return token; } static guint32 create_typespec (MonoDynamicImage *assembly, MonoType *type) { MonoDynamicTable *table; guint32 *values; guint32 token; SigBuffer buf; if ((token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typespec, type)))) return token; sigbuffer_init (&buf, 32); switch (type->type) { case MONO_TYPE_FNPTR: case MONO_TYPE_PTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_VAR: case MONO_TYPE_MVAR: case MONO_TYPE_GENERICINST: encode_type (assembly, type, &buf); break; case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: { MonoClass *k = mono_class_from_mono_type (type); if (!k || !k->generic_container) { sigbuffer_free (&buf); return 0; } encode_type (assembly, type, &buf); break; } default: sigbuffer_free (&buf); return 0; } table = &assembly->tables [MONO_TABLE_TYPESPEC]; if (assembly->save) { token = sigbuffer_add_to_blob_cached (assembly, &buf); alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_TYPESPEC_SIZE; values [MONO_TYPESPEC_SIGNATURE] = token; } sigbuffer_free (&buf); token = MONO_TYPEDEFORREF_TYPESPEC | (table->next_idx << MONO_TYPEDEFORREF_BITS); g_hash_table_insert (assembly->typespec, type, GUINT_TO_POINTER(token)); table->next_idx ++; return token; } static guint32 mono_image_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec) { MonoDynamicTable *table; guint32 *values; guint32 token, scope, enclosing; MonoClass *klass; /* if the type requires a typespec, we must try that first*/ if (try_typespec && (token = create_typespec (assembly, type))) return token; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typeref, type)); if (token) return token; klass = mono_class_from_mono_type (type); if (!klass) klass = mono_class_from_mono_type (type); /* * If it's in the same module and not a generic type parameter: */ if ((klass->image == &assembly->image) && (type->type != MONO_TYPE_VAR) && (type->type != MONO_TYPE_MVAR)) { MonoReflectionTypeBuilder *tb = klass->reflection_info; token = MONO_TYPEDEFORREF_TYPEDEF | (tb->table_idx << MONO_TYPEDEFORREF_BITS); mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), klass->reflection_info); return token; } if (klass->nested_in) { enclosing = mono_image_typedef_or_ref_full (assembly, &klass->nested_in->byval_arg, FALSE); /* get the typeref idx of the enclosing type */ enclosing >>= MONO_TYPEDEFORREF_BITS; scope = (enclosing << MONO_RESOLTION_SCOPE_BITS) | MONO_RESOLTION_SCOPE_TYPEREF; } else { scope = resolution_scope_from_image (assembly, klass->image); } table = &assembly->tables [MONO_TABLE_TYPEREF]; if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_TYPEREF_SIZE; values [MONO_TYPEREF_SCOPE] = scope; values [MONO_TYPEREF_NAME] = string_heap_insert (&assembly->sheap, klass->name); values [MONO_TYPEREF_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space); } token = MONO_TYPEDEFORREF_TYPEREF | (table->next_idx << MONO_TYPEDEFORREF_BITS); /* typeref */ g_hash_table_insert (assembly->typeref, type, GUINT_TO_POINTER(token)); table->next_idx ++; mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), klass->reflection_info); return token; } /* * Despite the name, we handle also TypeSpec (with the above helper). */ static guint32 mono_image_typedef_or_ref (MonoDynamicImage *assembly, MonoType *type) { return mono_image_typedef_or_ref_full (assembly, type, TRUE); } #ifndef DISABLE_REFLECTION_EMIT /* * Insert a memberef row into the metadata: the token that point to the memberref * is returned. Caching is done in the caller (mono_image_get_methodref_token() or * mono_image_get_fieldref_token()). * The sig param is an index to an already built signature. */ static guint32 mono_image_get_memberref_token (MonoDynamicImage *assembly, MonoType *type, const char *name, guint32 sig) { MonoDynamicTable *table; guint32 *values; guint32 token, pclass; guint32 parent; parent = mono_image_typedef_or_ref (assembly, type); switch (parent & MONO_TYPEDEFORREF_MASK) { case MONO_TYPEDEFORREF_TYPEREF: pclass = MONO_MEMBERREF_PARENT_TYPEREF; break; case MONO_TYPEDEFORREF_TYPESPEC: pclass = MONO_MEMBERREF_PARENT_TYPESPEC; break; case MONO_TYPEDEFORREF_TYPEDEF: pclass = MONO_MEMBERREF_PARENT_TYPEDEF; break; default: g_warning ("unknown typeref or def token 0x%08x for %s", parent, name); return 0; } /* extract the index */ parent >>= MONO_TYPEDEFORREF_BITS; table = &assembly->tables [MONO_TABLE_MEMBERREF]; if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_MEMBERREF_SIZE; values [MONO_MEMBERREF_CLASS] = pclass | (parent << MONO_MEMBERREF_PARENT_BITS); values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name); values [MONO_MEMBERREF_SIGNATURE] = sig; } token = MONO_TOKEN_MEMBER_REF | table->next_idx; table->next_idx ++; return token; } static guint32 mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec) { guint32 token; MonoMethodSignature *sig; create_typespec = create_typespec && method->is_generic && method->klass->image != &assembly->image; if (create_typespec) { token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, GUINT_TO_POINTER (GPOINTER_TO_UINT (method) + 1))); if (token) return token; } token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method)); if (token && !create_typespec) return token; g_assert (!method->is_inflated); if (!token) { /* * A methodref signature can't contain an unmanaged calling convention. */ sig = mono_metadata_signature_dup (mono_method_signature (method)); if ((sig->call_convention != MONO_CALL_DEFAULT) && (sig->call_convention != MONO_CALL_VARARG)) sig->call_convention = MONO_CALL_DEFAULT; token = mono_image_get_memberref_token (assembly, &method->klass->byval_arg, method->name, method_encode_signature (assembly, sig)); g_free (sig); g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token)); } if (create_typespec) { MonoDynamicTable *table = &assembly->tables [MONO_TABLE_METHODSPEC]; g_assert (mono_metadata_token_table (token) == MONO_TABLE_MEMBERREF); token = (mono_metadata_token_index (token) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF; if (assembly->save) { guint32 *values; alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_METHODSPEC_SIZE; values [MONO_METHODSPEC_METHOD] = token; values [MONO_METHODSPEC_SIGNATURE] = encode_generic_method_sig (assembly, &mono_method_get_generic_container (method)->context); } token = MONO_TOKEN_METHOD_SPEC | table->next_idx; table->next_idx ++; /*methodspec and memberef tokens are diferent, */ g_hash_table_insert (assembly->handleref, GUINT_TO_POINTER (GPOINTER_TO_UINT (method) + 1), GUINT_TO_POINTER (token)); return token; } return token; } static guint32 mono_image_get_methodref_token_for_methodbuilder (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *method) { guint32 token; ReflectionMethodBuilder rmb; char *name; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method)); if (token) return token; name = mono_string_to_utf8 (method->name); reflection_methodbuilder_from_method_builder (&rmb, method); /* * A methodref signature can't contain an unmanaged calling convention. * Since some flags are encoded as part of call_conv, we need to check against it. */ if ((rmb.call_conv & ~0x60) != MONO_CALL_DEFAULT && (rmb.call_conv & ~0x60) != MONO_CALL_VARARG) rmb.call_conv = (rmb.call_conv & 0x60) | MONO_CALL_DEFAULT; token = mono_image_get_memberref_token (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)rmb.type), name, method_builder_encode_signature (assembly, &rmb)); g_free (name); g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_varargs_method_token (MonoDynamicImage *assembly, guint32 original, const gchar *name, guint32 sig) { MonoDynamicTable *table; guint32 token; guint32 *values; table = &assembly->tables [MONO_TABLE_MEMBERREF]; if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_MEMBERREF_SIZE; values [MONO_MEMBERREF_CLASS] = original; values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name); values [MONO_MEMBERREF_SIGNATURE] = sig; } token = MONO_TOKEN_MEMBER_REF | table->next_idx; table->next_idx ++; return token; } static guint32 encode_generic_method_definition_sig (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb) { SigBuffer buf; int i; guint32 nparams = mono_array_length (mb->generic_params); guint32 idx; if (!assembly->save) return 0; sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0xa); sigbuffer_add_value (&buf, nparams); for (i = 0; i < nparams; i++) { sigbuffer_add_value (&buf, MONO_TYPE_MVAR); sigbuffer_add_value (&buf, i); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 mono_image_get_methodspec_token_for_generic_method_definition (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb) { MonoDynamicTable *table; guint32 *values; guint32 token, mtoken = 0; token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->methodspec, mb)); if (token) return token; table = &assembly->tables [MONO_TABLE_METHODSPEC]; mtoken = mono_image_get_methodref_token_for_methodbuilder (assembly, mb); switch (mono_metadata_token_table (mtoken)) { case MONO_TABLE_MEMBERREF: mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF; break; case MONO_TABLE_METHOD: mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODDEF; break; default: g_assert_not_reached (); } if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_METHODSPEC_SIZE; values [MONO_METHODSPEC_METHOD] = mtoken; values [MONO_METHODSPEC_SIGNATURE] = encode_generic_method_definition_sig (assembly, mb); } token = MONO_TOKEN_METHOD_SPEC | table->next_idx; table->next_idx ++; mono_g_hash_table_insert (assembly->methodspec, mb, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_methodbuilder_token (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb, gboolean create_methodspec) { guint32 token; if (mb->generic_params && create_methodspec) return mono_image_get_methodspec_token_for_generic_method_definition (assembly, mb); token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, mb)); if (token) return token; token = mono_image_get_methodref_token_for_methodbuilder (assembly, mb); g_hash_table_insert (assembly->handleref, mb, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_ctorbuilder_token (MonoDynamicImage *assembly, MonoReflectionCtorBuilder *mb) { guint32 token; ReflectionMethodBuilder rmb; char *name; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, mb)); if (token) return token; reflection_methodbuilder_from_ctor_builder (&rmb, mb); name = mono_string_to_utf8 (rmb.name); token = mono_image_get_memberref_token (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)rmb.type), name, method_builder_encode_signature (assembly, &rmb)); g_free (name); g_hash_table_insert (assembly->handleref, mb, GUINT_TO_POINTER(token)); return token; } #endif static gboolean is_field_on_inst (MonoClassField *field) { return (field->parent->generic_class && field->parent->generic_class->is_dynamic && ((MonoDynamicGenericClass*)field->parent->generic_class)->fields); } /* * If FIELD is a field of a MonoDynamicGenericClass, return its non-inflated type. */ static MonoType* get_field_on_inst_generic_type (MonoClassField *field) { MonoDynamicGenericClass *dgclass; int field_index; g_assert (is_field_on_inst (field)); dgclass = (MonoDynamicGenericClass*)field->parent->generic_class; field_index = field - dgclass->fields; g_assert (field_index >= 0 && field_index < dgclass->count_fields); return dgclass->field_generic_types [field_index]; } #ifndef DISABLE_REFLECTION_EMIT static guint32 mono_image_get_fieldref_token (MonoDynamicImage *assembly, MonoReflectionField *f) { MonoType *type; guint32 token; MonoClassField *field; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, f)); if (token) return token; g_assert (f->field->parent); field = f->field; if (field->parent->generic_class && field->parent->generic_class->container_class && field->parent->generic_class->container_class->fields) { int index = field - field->parent->fields; type = field->parent->generic_class->container_class->fields [index].type; } else { if (is_field_on_inst (f->field)) type = get_field_on_inst_generic_type (f->field); else type = f->field->type; } token = mono_image_get_memberref_token (assembly, &f->field->parent->byval_arg, mono_field_get_name (f->field), fieldref_encode_signature (assembly, field->parent->image, type)); g_hash_table_insert (assembly->handleref, f, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_field_on_inst_token (MonoDynamicImage *assembly, MonoReflectionFieldOnTypeBuilderInst *f) { guint32 token; MonoClass *klass; MonoGenericClass *gclass; MonoDynamicGenericClass *dgclass; MonoReflectionFieldBuilder *fb = f->fb; MonoType *type; char *name; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, f)); if (token) return token; type = mono_reflection_type_get_handle ((MonoReflectionType*)f->inst); klass = mono_class_from_mono_type (type); gclass = type->data.generic_class; g_assert (gclass->is_dynamic); dgclass = (MonoDynamicGenericClass *) gclass; name = mono_string_to_utf8 (fb->name); token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, field_encode_signature (assembly, fb)); g_free (name); g_hash_table_insert (assembly->handleref, f, GUINT_TO_POINTER (token)); return token; } static guint32 mono_image_get_ctor_on_inst_token (MonoDynamicImage *assembly, MonoReflectionCtorOnTypeBuilderInst *c, gboolean create_methodspec) { guint32 sig, token; MonoClass *klass; MonoGenericClass *gclass; MonoDynamicGenericClass *dgclass; MonoReflectionCtorBuilder *cb = c->cb; ReflectionMethodBuilder rmb; MonoType *type; char *name; /* A ctor cannot be a generic method, so we can ignore create_methodspec */ token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, c)); if (token) return token; type = mono_reflection_type_get_handle ((MonoReflectionType*)c->inst); klass = mono_class_from_mono_type (type); gclass = type->data.generic_class; g_assert (gclass->is_dynamic); dgclass = (MonoDynamicGenericClass *) gclass; reflection_methodbuilder_from_ctor_builder (&rmb, cb); name = mono_string_to_utf8 (rmb.name); sig = method_builder_encode_signature (assembly, &rmb); token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, sig); g_free (name); g_hash_table_insert (assembly->handleref, c, GUINT_TO_POINTER (token)); return token; } static MonoMethod* mono_reflection_method_on_tb_inst_get_handle (MonoReflectionMethodOnTypeBuilderInst *m) { MonoClass *klass; MonoGenericContext tmp_context; MonoType **type_argv; MonoGenericInst *ginst; MonoMethod *method, *inflated; int count, i; method = inflate_method (m->inst, (MonoObject*)m->mb); klass = method->klass; if (m->method_args == NULL) return method; if (method->is_inflated) method = ((MonoMethodInflated *) method)->declaring; count = mono_array_length (m->method_args); type_argv = g_new0 (MonoType *, count); for (i = 0; i < count; i++) { MonoReflectionType *garg = mono_array_get (m->method_args, gpointer, i); type_argv [i] = mono_reflection_type_get_handle (garg); } ginst = mono_metadata_get_generic_inst (count, type_argv); g_free (type_argv); tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL; tmp_context.method_inst = ginst; inflated = mono_class_inflate_generic_method (method, &tmp_context); return inflated; } static guint32 mono_image_get_method_on_inst_token (MonoDynamicImage *assembly, MonoReflectionMethodOnTypeBuilderInst *m, gboolean create_methodspec) { guint32 sig, token; MonoClass *klass; MonoGenericClass *gclass; MonoReflectionMethodBuilder *mb = m->mb; ReflectionMethodBuilder rmb; MonoType *type; char *name; if (m->method_args) { MonoMethod *inflated; inflated = mono_reflection_method_on_tb_inst_get_handle (m); if (create_methodspec) token = mono_image_get_methodspec_token (assembly, inflated); else token = mono_image_get_inflated_method_token (assembly, inflated); return token; } token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, m)); if (token) return token; type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst); klass = mono_class_from_mono_type (type); gclass = type->data.generic_class; g_assert (gclass->is_dynamic); reflection_methodbuilder_from_method_builder (&rmb, mb); name = mono_string_to_utf8 (rmb.name); sig = method_builder_encode_signature (assembly, &rmb); token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, sig); g_free (name); g_hash_table_insert (assembly->handleref, m, GUINT_TO_POINTER (token)); return token; } static guint32 encode_generic_method_sig (MonoDynamicImage *assembly, MonoGenericContext *context) { SigBuffer buf; int i; guint32 nparams = context->method_inst->type_argc; guint32 idx; if (!assembly->save) return 0; sigbuffer_init (&buf, 32); /* * FIXME: vararg, explicit_this, differenc call_conv values... */ sigbuffer_add_value (&buf, 0xa); /* FIXME FIXME FIXME */ sigbuffer_add_value (&buf, nparams); for (i = 0; i < nparams; i++) encode_type (assembly, context->method_inst->type_argv [i], &buf); idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 method_encode_methodspec (MonoDynamicImage *assembly, MonoMethod *method) { MonoDynamicTable *table; guint32 *values; guint32 token, mtoken = 0, sig; MonoMethodInflated *imethod; MonoMethod *declaring; table = &assembly->tables [MONO_TABLE_METHODSPEC]; g_assert (method->is_inflated); imethod = (MonoMethodInflated *) method; declaring = imethod->declaring; sig = method_encode_signature (assembly, mono_method_signature (declaring)); mtoken = mono_image_get_memberref_token (assembly, &method->klass->byval_arg, declaring->name, sig); if (!mono_method_signature (declaring)->generic_param_count) return mtoken; switch (mono_metadata_token_table (mtoken)) { case MONO_TABLE_MEMBERREF: mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF; break; case MONO_TABLE_METHOD: mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODDEF; break; default: g_assert_not_reached (); } sig = encode_generic_method_sig (assembly, mono_method_get_context (method)); if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_METHODSPEC_SIZE; values [MONO_METHODSPEC_METHOD] = mtoken; values [MONO_METHODSPEC_SIGNATURE] = sig; } token = MONO_TOKEN_METHOD_SPEC | table->next_idx; table->next_idx ++; return token; } static guint32 mono_image_get_methodspec_token (MonoDynamicImage *assembly, MonoMethod *method) { MonoMethodInflated *imethod; guint32 token; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method)); if (token) return token; g_assert (method->is_inflated); imethod = (MonoMethodInflated *) method; if (mono_method_signature (imethod->declaring)->generic_param_count) { token = method_encode_methodspec (assembly, method); } else { guint32 sig = method_encode_signature ( assembly, mono_method_signature (imethod->declaring)); token = mono_image_get_memberref_token ( assembly, &method->klass->byval_arg, method->name, sig); } g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_inflated_method_token (MonoDynamicImage *assembly, MonoMethod *m) { MonoMethodInflated *imethod = (MonoMethodInflated *) m; guint32 sig, token; sig = method_encode_signature (assembly, mono_method_signature (imethod->declaring)); token = mono_image_get_memberref_token ( assembly, &m->klass->byval_arg, m->name, sig); return token; } static guint32 create_generic_typespec (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb) { MonoDynamicTable *table; MonoClass *klass; MonoType *type; guint32 *values; guint32 token; SigBuffer buf; int count, i; /* * We're creating a TypeSpec for the TypeBuilder of a generic type declaration, * ie. what we'd normally use as the generic type in a TypeSpec signature. * Because of this, we must not insert it into the `typeref' hash table. */ type = mono_reflection_type_get_handle ((MonoReflectionType*)tb); token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typespec, type)); if (token) return token; sigbuffer_init (&buf, 32); g_assert (tb->generic_params); klass = mono_class_from_mono_type (type); if (tb->generic_container) mono_reflection_create_generic_class (tb); sigbuffer_add_value (&buf, MONO_TYPE_GENERICINST); g_assert (klass->generic_container); sigbuffer_add_value (&buf, klass->byval_arg.type); sigbuffer_add_value (&buf, mono_image_typedef_or_ref_full (assembly, &klass->byval_arg, FALSE)); count = mono_array_length (tb->generic_params); sigbuffer_add_value (&buf, count); for (i = 0; i < count; i++) { MonoReflectionGenericParam *gparam; gparam = mono_array_get (tb->generic_params, MonoReflectionGenericParam *, i); encode_type (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)gparam), &buf); } table = &assembly->tables [MONO_TABLE_TYPESPEC]; if (assembly->save) { token = sigbuffer_add_to_blob_cached (assembly, &buf); alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_TYPESPEC_SIZE; values [MONO_TYPESPEC_SIGNATURE] = token; } sigbuffer_free (&buf); token = MONO_TYPEDEFORREF_TYPESPEC | (table->next_idx << MONO_TYPEDEFORREF_BITS); g_hash_table_insert (assembly->typespec, type, GUINT_TO_POINTER(token)); table->next_idx ++; return token; } /* * Return a copy of TYPE, adding the custom modifiers in MODREQ and MODOPT. */ static MonoType* add_custom_modifiers (MonoDynamicImage *assembly, MonoType *type, MonoArray *modreq, MonoArray *modopt) { int i, count, len, pos; MonoType *t; count = 0; if (modreq) count += mono_array_length (modreq); if (modopt) count += mono_array_length (modopt); if (count == 0) return mono_metadata_type_dup (NULL, type); len = MONO_SIZEOF_TYPE + ((gint32)count) * sizeof (MonoCustomMod); t = g_malloc (len); memcpy (t, type, MONO_SIZEOF_TYPE); t->num_mods = count; pos = 0; if (modreq) { for (i = 0; i < mono_array_length (modreq); ++i) { MonoType *mod = mono_type_array_get_and_resolve (modreq, i); t->modifiers [pos].required = 1; t->modifiers [pos].token = mono_image_typedef_or_ref (assembly, mod); pos ++; } } if (modopt) { for (i = 0; i < mono_array_length (modopt); ++i) { MonoType *mod = mono_type_array_get_and_resolve (modopt, i); t->modifiers [pos].required = 0; t->modifiers [pos].token = mono_image_typedef_or_ref (assembly, mod); pos ++; } } return t; } static guint32 mono_image_get_generic_field_token (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb) { MonoDynamicTable *table; MonoClass *klass; MonoType *custom = NULL; guint32 *values; guint32 token, pclass, parent, sig; gchar *name; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, fb)); if (token) return token; klass = mono_class_from_mono_type (mono_reflection_type_get_handle (fb->typeb)); name = mono_string_to_utf8 (fb->name); /* fb->type does not include the custom modifiers */ /* FIXME: We should do this in one place when a fieldbuilder is created */ if (fb->modreq || fb->modopt) { custom = add_custom_modifiers (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type), fb->modreq, fb->modopt); sig = fieldref_encode_signature (assembly, NULL, custom); g_free (custom); } else { sig = fieldref_encode_signature (assembly, NULL, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type)); } parent = create_generic_typespec (assembly, (MonoReflectionTypeBuilder *) fb->typeb); g_assert ((parent & MONO_TYPEDEFORREF_MASK) == MONO_TYPEDEFORREF_TYPESPEC); pclass = MONO_MEMBERREF_PARENT_TYPESPEC; parent >>= MONO_TYPEDEFORREF_BITS; table = &assembly->tables [MONO_TABLE_MEMBERREF]; if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_MEMBERREF_SIZE; values [MONO_MEMBERREF_CLASS] = pclass | (parent << MONO_MEMBERREF_PARENT_BITS); values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name); values [MONO_MEMBERREF_SIGNATURE] = sig; } token = MONO_TOKEN_MEMBER_REF | table->next_idx; table->next_idx ++; g_hash_table_insert (assembly->handleref, fb, GUINT_TO_POINTER(token)); g_free (name); return token; } static guint32 mono_reflection_encode_sighelper (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper) { SigBuffer buf; guint32 nargs; guint32 size; guint32 i, idx; if (!assembly->save) return 0; /* FIXME: this means SignatureHelper.SignatureHelpType.HELPER_METHOD */ g_assert (helper->type == 2); if (helper->arguments) nargs = mono_array_length (helper->arguments); else nargs = 0; size = 10 + (nargs * 10); sigbuffer_init (&buf, 32); /* Encode calling convention */ /* Change Any to Standard */ if ((helper->call_conv & 0x03) == 0x03) helper->call_conv = 0x01; /* explicit_this implies has_this */ if (helper->call_conv & 0x40) helper->call_conv &= 0x20; if (helper->call_conv == 0) { /* Unmanaged */ idx = helper->unmanaged_call_conv - 1; } else { /* Managed */ idx = helper->call_conv & 0x60; /* has_this + explicit_this */ if (helper->call_conv & 0x02) /* varargs */ idx += 0x05; } sigbuffer_add_byte (&buf, idx); sigbuffer_add_value (&buf, nargs); encode_reflection_type (assembly, helper->return_type, &buf); for (i = 0; i < nargs; ++i) { MonoArray *modreqs = NULL; MonoArray *modopts = NULL; MonoReflectionType *pt; if (helper->modreqs && (i < mono_array_length (helper->modreqs))) modreqs = mono_array_get (helper->modreqs, MonoArray*, i); if (helper->modopts && (i < mono_array_length (helper->modopts))) modopts = mono_array_get (helper->modopts, MonoArray*, i); encode_custom_modifiers (assembly, modreqs, modopts, &buf); pt = mono_array_get (helper->arguments, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 mono_image_get_sighelper_token (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper) { guint32 idx; MonoDynamicTable *table; guint32 *values; table = &assembly->tables [MONO_TABLE_STANDALONESIG]; idx = table->next_idx ++; table->rows ++; alloc_table (table, table->rows); values = table->values + idx * MONO_STAND_ALONE_SIGNATURE_SIZE; values [MONO_STAND_ALONE_SIGNATURE] = mono_reflection_encode_sighelper (assembly, helper); return idx; } static int reflection_cc_to_file (int call_conv) { switch (call_conv & 0x3) { case 0: case 1: return MONO_CALL_DEFAULT; case 2: return MONO_CALL_VARARG; default: g_assert_not_reached (); } return 0; } #endif /* !DISABLE_REFLECTION_EMIT */ typedef struct { MonoType *parent; MonoMethodSignature *sig; char *name; guint32 token; } ArrayMethod; #ifndef DISABLE_REFLECTION_EMIT static guint32 mono_image_get_array_token (MonoDynamicImage *assembly, MonoReflectionArrayMethod *m) { guint32 nparams, i; GList *tmp; char *name; MonoMethodSignature *sig; ArrayMethod *am; MonoType *mtype; name = mono_string_to_utf8 (m->name); nparams = mono_array_length (m->parameters); sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + sizeof (MonoType*) * nparams); sig->hasthis = 1; sig->sentinelpos = -1; sig->call_convention = reflection_cc_to_file (m->call_conv); sig->param_count = nparams; sig->ret = m->ret ? mono_reflection_type_get_handle (m->ret): &mono_defaults.void_class->byval_arg; mtype = mono_reflection_type_get_handle (m->parent); for (i = 0; i < nparams; ++i) sig->params [i] = mono_type_array_get_and_resolve (m->parameters, i); for (tmp = assembly->array_methods; tmp; tmp = tmp->next) { am = tmp->data; if (strcmp (name, am->name) == 0 && mono_metadata_type_equal (am->parent, mtype) && mono_metadata_signature_equal (am->sig, sig)) { g_free (name); g_free (sig); m->table_idx = am->token & 0xffffff; return am->token; } } am = g_new0 (ArrayMethod, 1); am->name = name; am->sig = sig; am->parent = mtype; am->token = mono_image_get_memberref_token (assembly, am->parent, name, method_encode_signature (assembly, sig)); assembly->array_methods = g_list_prepend (assembly->array_methods, am); m->table_idx = am->token & 0xffffff; return am->token; } /* * Insert into the metadata tables all the info about the TypeBuilder tb. * Data in the tables is inserted in a predefined order, since some tables need to be sorted. */ static void mono_image_get_type_info (MonoDomain *domain, MonoReflectionTypeBuilder *tb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint *values; int i, is_object = 0, is_system = 0; char *n; table = &assembly->tables [MONO_TABLE_TYPEDEF]; values = table->values + tb->table_idx * MONO_TYPEDEF_SIZE; values [MONO_TYPEDEF_FLAGS] = tb->attrs; n = mono_string_to_utf8 (tb->name); if (strcmp (n, "Object") == 0) is_object++; values [MONO_TYPEDEF_NAME] = string_heap_insert (&assembly->sheap, n); g_free (n); n = mono_string_to_utf8 (tb->nspace); if (strcmp (n, "System") == 0) is_system++; values [MONO_TYPEDEF_NAMESPACE] = string_heap_insert (&assembly->sheap, n); g_free (n); if (tb->parent && !(is_system && is_object) && !(tb->attrs & TYPE_ATTRIBUTE_INTERFACE)) { /* interfaces don't have a parent */ values [MONO_TYPEDEF_EXTENDS] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent)); } else { values [MONO_TYPEDEF_EXTENDS] = 0; } values [MONO_TYPEDEF_FIELD_LIST] = assembly->tables [MONO_TABLE_FIELD].next_idx; values [MONO_TYPEDEF_METHOD_LIST] = assembly->tables [MONO_TABLE_METHOD].next_idx; /* * if we have explicitlayout or sequentiallayouts, output data in the * ClassLayout table. */ if (((tb->attrs & TYPE_ATTRIBUTE_LAYOUT_MASK) != TYPE_ATTRIBUTE_AUTO_LAYOUT) && ((tb->class_size > 0) || (tb->packing_size > 0))) { table = &assembly->tables [MONO_TABLE_CLASSLAYOUT]; table->rows++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_CLASS_LAYOUT_SIZE; values [MONO_CLASS_LAYOUT_PARENT] = tb->table_idx; values [MONO_CLASS_LAYOUT_CLASS_SIZE] = tb->class_size; values [MONO_CLASS_LAYOUT_PACKING_SIZE] = tb->packing_size; } /* handle interfaces */ if (tb->interfaces) { table = &assembly->tables [MONO_TABLE_INTERFACEIMPL]; i = table->rows; table->rows += mono_array_length (tb->interfaces); alloc_table (table, table->rows); values = table->values + (i + 1) * MONO_INTERFACEIMPL_SIZE; for (i = 0; i < mono_array_length (tb->interfaces); ++i) { MonoReflectionType* iface = (MonoReflectionType*) mono_array_get (tb->interfaces, gpointer, i); values [MONO_INTERFACEIMPL_CLASS] = tb->table_idx; values [MONO_INTERFACEIMPL_INTERFACE] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (iface)); values += MONO_INTERFACEIMPL_SIZE; } } /* handle fields */ if (tb->fields) { table = &assembly->tables [MONO_TABLE_FIELD]; table->rows += tb->num_fields; alloc_table (table, table->rows); for (i = 0; i < tb->num_fields; ++i) mono_image_get_field_info ( mono_array_get (tb->fields, MonoReflectionFieldBuilder*, i), assembly); } /* handle constructors */ if (tb->ctors) { table = &assembly->tables [MONO_TABLE_METHOD]; table->rows += mono_array_length (tb->ctors); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (tb->ctors); ++i) mono_image_get_ctor_info (domain, mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i), assembly); } /* handle methods */ if (tb->methods) { table = &assembly->tables [MONO_TABLE_METHOD]; table->rows += tb->num_methods; alloc_table (table, table->rows); for (i = 0; i < tb->num_methods; ++i) mono_image_get_method_info ( mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i), assembly); } /* Do the same with properties etc.. */ if (tb->events && mono_array_length (tb->events)) { table = &assembly->tables [MONO_TABLE_EVENT]; table->rows += mono_array_length (tb->events); alloc_table (table, table->rows); table = &assembly->tables [MONO_TABLE_EVENTMAP]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_EVENT_MAP_SIZE; values [MONO_EVENT_MAP_PARENT] = tb->table_idx; values [MONO_EVENT_MAP_EVENTLIST] = assembly->tables [MONO_TABLE_EVENT].next_idx; for (i = 0; i < mono_array_length (tb->events); ++i) mono_image_get_event_info ( mono_array_get (tb->events, MonoReflectionEventBuilder*, i), assembly); } if (tb->properties && mono_array_length (tb->properties)) { table = &assembly->tables [MONO_TABLE_PROPERTY]; table->rows += mono_array_length (tb->properties); alloc_table (table, table->rows); table = &assembly->tables [MONO_TABLE_PROPERTYMAP]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_PROPERTY_MAP_SIZE; values [MONO_PROPERTY_MAP_PARENT] = tb->table_idx; values [MONO_PROPERTY_MAP_PROPERTY_LIST] = assembly->tables [MONO_TABLE_PROPERTY].next_idx; for (i = 0; i < mono_array_length (tb->properties); ++i) mono_image_get_property_info ( mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i), assembly); } /* handle generic parameters */ if (tb->generic_params) { table = &assembly->tables [MONO_TABLE_GENERICPARAM]; table->rows += mono_array_length (tb->generic_params); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (tb->generic_params); ++i) { guint32 owner = MONO_TYPEORMETHOD_TYPE | (tb->table_idx << MONO_TYPEORMETHOD_BITS); mono_image_get_generic_param_info ( mono_array_get (tb->generic_params, MonoReflectionGenericParam*, i), owner, assembly); } } mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_TYPEDEF, tb->table_idx), tb->permissions); if (tb->subtypes) { MonoDynamicTable *ntable; ntable = &assembly->tables [MONO_TABLE_NESTEDCLASS]; ntable->rows += mono_array_length (tb->subtypes); alloc_table (ntable, ntable->rows); values = ntable->values + ntable->next_idx * MONO_NESTED_CLASS_SIZE; for (i = 0; i < mono_array_length (tb->subtypes); ++i) { MonoReflectionTypeBuilder *subtype = mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i); values [MONO_NESTED_CLASS_NESTED] = subtype->table_idx; values [MONO_NESTED_CLASS_ENCLOSING] = tb->table_idx; /*g_print ("nesting %s (%d) in %s (%d) (rows %d/%d)\n", mono_string_to_utf8 (subtype->name), subtype->table_idx, mono_string_to_utf8 (tb->name), tb->table_idx, ntable->next_idx, ntable->rows);*/ values += MONO_NESTED_CLASS_SIZE; ntable->next_idx++; } } } #endif static void collect_types (GPtrArray *types, MonoReflectionTypeBuilder *type) { int i; g_ptr_array_add (types, type); /* FIXME: GC object added to unmanaged memory */ if (!type->subtypes) return; for (i = 0; i < mono_array_length (type->subtypes); ++i) { MonoReflectionTypeBuilder *subtype = mono_array_get (type->subtypes, MonoReflectionTypeBuilder*, i); collect_types (types, subtype); } } static gint compare_types_by_table_idx (MonoReflectionTypeBuilder **type1, MonoReflectionTypeBuilder **type2) { if ((*type1)->table_idx < (*type2)->table_idx) return -1; else if ((*type1)->table_idx > (*type2)->table_idx) return 1; else return 0; } static void params_add_cattrs (MonoDynamicImage *assembly, MonoArray *pinfo) { int i; if (!pinfo) return; for (i = 0; i < mono_array_length (pinfo); ++i) { MonoReflectionParamBuilder *pb; pb = mono_array_get (pinfo, MonoReflectionParamBuilder *, i); if (!pb) continue; mono_image_add_cattrs (assembly, pb->table_idx, MONO_CUSTOM_ATTR_PARAMDEF, pb->cattrs); } } static void type_add_cattrs (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb) { int i; mono_image_add_cattrs (assembly, tb->table_idx, MONO_CUSTOM_ATTR_TYPEDEF, tb->cattrs); if (tb->fields) { for (i = 0; i < tb->num_fields; ++i) { MonoReflectionFieldBuilder* fb; fb = mono_array_get (tb->fields, MonoReflectionFieldBuilder*, i); mono_image_add_cattrs (assembly, fb->table_idx, MONO_CUSTOM_ATTR_FIELDDEF, fb->cattrs); } } if (tb->events) { for (i = 0; i < mono_array_length (tb->events); ++i) { MonoReflectionEventBuilder* eb; eb = mono_array_get (tb->events, MonoReflectionEventBuilder*, i); mono_image_add_cattrs (assembly, eb->table_idx, MONO_CUSTOM_ATTR_EVENT, eb->cattrs); } } if (tb->properties) { for (i = 0; i < mono_array_length (tb->properties); ++i) { MonoReflectionPropertyBuilder* pb; pb = mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i); mono_image_add_cattrs (assembly, pb->table_idx, MONO_CUSTOM_ATTR_PROPERTY, pb->cattrs); } } if (tb->ctors) { for (i = 0; i < mono_array_length (tb->ctors); ++i) { MonoReflectionCtorBuilder* cb; cb = mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i); mono_image_add_cattrs (assembly, cb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, cb->cattrs); params_add_cattrs (assembly, cb->pinfo); } } if (tb->methods) { for (i = 0; i < tb->num_methods; ++i) { MonoReflectionMethodBuilder* mb; mb = mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i); mono_image_add_cattrs (assembly, mb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, mb->cattrs); params_add_cattrs (assembly, mb->pinfo); } } if (tb->subtypes) { for (i = 0; i < mono_array_length (tb->subtypes); ++i) type_add_cattrs (assembly, mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i)); } } static void module_add_cattrs (MonoDynamicImage *assembly, MonoReflectionModuleBuilder *moduleb) { int i; mono_image_add_cattrs (assembly, moduleb->table_idx, MONO_CUSTOM_ATTR_MODULE, moduleb->cattrs); if (moduleb->global_methods) { for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) { MonoReflectionMethodBuilder* mb = mono_array_get (moduleb->global_methods, MonoReflectionMethodBuilder*, i); mono_image_add_cattrs (assembly, mb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, mb->cattrs); params_add_cattrs (assembly, mb->pinfo); } } if (moduleb->global_fields) { for (i = 0; i < mono_array_length (moduleb->global_fields); ++i) { MonoReflectionFieldBuilder *fb = mono_array_get (moduleb->global_fields, MonoReflectionFieldBuilder*, i); mono_image_add_cattrs (assembly, fb->table_idx, MONO_CUSTOM_ATTR_FIELDDEF, fb->cattrs); } } if (moduleb->types) { for (i = 0; i < moduleb->num_types; ++i) type_add_cattrs (assembly, mono_array_get (moduleb->types, MonoReflectionTypeBuilder*, i)); } } static void mono_image_fill_file_table (MonoDomain *domain, MonoReflectionModule *module, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; char blob_size [6]; guchar hash [20]; char *b = blob_size; char *dir, *path; table = &assembly->tables [MONO_TABLE_FILE]; table->rows++; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_FILE_SIZE; values [MONO_FILE_FLAGS] = FILE_CONTAINS_METADATA; values [MONO_FILE_NAME] = string_heap_insert (&assembly->sheap, module->image->module_name); if (module->image->dynamic) { /* This depends on the fact that the main module is emitted last */ dir = mono_string_to_utf8 (((MonoReflectionModuleBuilder*)module)->assemblyb->dir); path = g_strdup_printf ("%s%c%s", dir, G_DIR_SEPARATOR, module->image->module_name); } else { dir = NULL; path = g_strdup (module->image->name); } mono_sha1_get_digest_from_file (path, hash); g_free (dir); g_free (path); mono_metadata_encode_value (20, b, &b); values [MONO_FILE_HASH_VALUE] = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size); mono_image_add_stream_data (&assembly->blob, (char*)hash, 20); table->next_idx ++; } static void mono_image_fill_module_table (MonoDomain *domain, MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly) { MonoDynamicTable *table; int i; table = &assembly->tables [MONO_TABLE_MODULE]; mb->table_idx = table->next_idx ++; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->module.name); i = mono_image_add_stream_data (&assembly->guid, mono_array_addr (mb->guid, char, 0), 16); i /= 16; ++i; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_GENERATION] = 0; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_MVID] = i; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_ENC] = 0; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_ENCBASE] = 0; } static guint32 mono_image_fill_export_table_from_class (MonoDomain *domain, MonoClass *klass, guint32 module_index, guint32 parent_index, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint32 visib, res; visib = klass->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK; if (! ((visib & TYPE_ATTRIBUTE_PUBLIC) || (visib & TYPE_ATTRIBUTE_NESTED_PUBLIC))) return 0; table = &assembly->tables [MONO_TABLE_EXPORTEDTYPE]; table->rows++; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_EXP_TYPE_SIZE; values [MONO_EXP_TYPE_FLAGS] = klass->flags; values [MONO_EXP_TYPE_TYPEDEF] = klass->type_token; if (klass->nested_in) values [MONO_EXP_TYPE_IMPLEMENTATION] = (parent_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_EXP_TYPE; else values [MONO_EXP_TYPE_IMPLEMENTATION] = (module_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_FILE; values [MONO_EXP_TYPE_NAME] = string_heap_insert (&assembly->sheap, klass->name); values [MONO_EXP_TYPE_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space); res = table->next_idx; table->next_idx ++; /* Emit nested types */ if (klass->ext && klass->ext->nested_classes) { GList *tmp; for (tmp = klass->ext->nested_classes; tmp; tmp = tmp->next) mono_image_fill_export_table_from_class (domain, tmp->data, module_index, table->next_idx - 1, assembly); } return res; } static void mono_image_fill_export_table (MonoDomain *domain, MonoReflectionTypeBuilder *tb, guint32 module_index, guint32 parent_index, MonoDynamicImage *assembly) { MonoClass *klass; guint32 idx, i; klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb)); klass->type_token = mono_metadata_make_token (MONO_TABLE_TYPEDEF, tb->table_idx); idx = mono_image_fill_export_table_from_class (domain, klass, module_index, parent_index, assembly); /* * Emit nested types * We need to do this ourselves since klass->nested_classes is not set up. */ if (tb->subtypes) { for (i = 0; i < mono_array_length (tb->subtypes); ++i) mono_image_fill_export_table (domain, mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i), module_index, idx, assembly); } } static void mono_image_fill_export_table_from_module (MonoDomain *domain, MonoReflectionModule *module, guint32 module_index, MonoDynamicImage *assembly) { MonoImage *image = module->image; MonoTableInfo *t; guint32 i; t = &image->tables [MONO_TABLE_TYPEDEF]; for (i = 0; i < t->rows; ++i) { MonoClass *klass = mono_class_get (image, mono_metadata_make_token (MONO_TABLE_TYPEDEF, i + 1)); if (klass->flags & TYPE_ATTRIBUTE_PUBLIC) mono_image_fill_export_table_from_class (domain, klass, module_index, 0, assembly); } } static guint32 add_exported_type (MonoReflectionAssemblyBuilder *assemblyb, MonoDynamicImage *assembly, MonoClass *klass) { MonoDynamicTable *table; guint32 *values; guint32 scope, idx, res, impl; gboolean forwarder = TRUE; if (klass->nested_in) { impl = add_exported_type (assemblyb, assembly, klass->nested_in); forwarder = FALSE; } else { scope = resolution_scope_from_image (assembly, klass->image); g_assert ((scope & MONO_RESOLTION_SCOPE_MASK) == MONO_RESOLTION_SCOPE_ASSEMBLYREF); idx = scope >> MONO_RESOLTION_SCOPE_BITS; impl = (idx << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_ASSEMBLYREF; } table = &assembly->tables [MONO_TABLE_EXPORTEDTYPE]; table->rows++; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_EXP_TYPE_SIZE; values [MONO_EXP_TYPE_FLAGS] = forwarder ? TYPE_ATTRIBUTE_FORWARDER : 0; values [MONO_EXP_TYPE_TYPEDEF] = 0; values [MONO_EXP_TYPE_IMPLEMENTATION] = impl; values [MONO_EXP_TYPE_NAME] = string_heap_insert (&assembly->sheap, klass->name); values [MONO_EXP_TYPE_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space); res = (table->next_idx << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_EXP_TYPE; table->next_idx++; return res; } static void mono_image_fill_export_table_from_type_forwarders (MonoReflectionAssemblyBuilder *assemblyb, MonoDynamicImage *assembly) { MonoClass *klass; int i; if (!assemblyb->type_forwarders) return; for (i = 0; i < mono_array_length (assemblyb->type_forwarders); ++i) { MonoReflectionType *t = mono_array_get (assemblyb->type_forwarders, MonoReflectionType *, i); MonoType *type; if (!t) continue; type = mono_reflection_type_get_handle (t); g_assert (type); klass = mono_class_from_mono_type (type); add_exported_type (assemblyb, assembly, klass); } } #define align_pointer(base,p)\ do {\ guint32 __diff = (unsigned char*)(p)-(unsigned char*)(base);\ if (__diff & 3)\ (p) += 4 - (__diff & 3);\ } while (0) static int compare_constants (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_CONSTANT_PARENT] - b_values [MONO_CONSTANT_PARENT]; } static int compare_semantics (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; int assoc = a_values [MONO_METHOD_SEMA_ASSOCIATION] - b_values [MONO_METHOD_SEMA_ASSOCIATION]; if (assoc) return assoc; return a_values [MONO_METHOD_SEMA_SEMANTICS] - b_values [MONO_METHOD_SEMA_SEMANTICS]; } static int compare_custom_attrs (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_CUSTOM_ATTR_PARENT] - b_values [MONO_CUSTOM_ATTR_PARENT]; } static int compare_field_marshal (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_FIELD_MARSHAL_PARENT] - b_values [MONO_FIELD_MARSHAL_PARENT]; } static int compare_nested (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_NESTED_CLASS_NESTED] - b_values [MONO_NESTED_CLASS_NESTED]; } static int compare_genericparam (const void *a, const void *b) { const GenericParamTableEntry **a_entry = (const GenericParamTableEntry **) a; const GenericParamTableEntry **b_entry = (const GenericParamTableEntry **) b; if ((*b_entry)->owner == (*a_entry)->owner) return mono_type_get_generic_param_num (mono_reflection_type_get_handle ((MonoReflectionType*)(*a_entry)->gparam)) - mono_type_get_generic_param_num (mono_reflection_type_get_handle ((MonoReflectionType*)(*b_entry)->gparam)); else return (*a_entry)->owner - (*b_entry)->owner; } static int compare_declsecurity_attrs (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_DECL_SECURITY_PARENT] - b_values [MONO_DECL_SECURITY_PARENT]; } static int compare_interface_impl (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; int klass = a_values [MONO_INTERFACEIMPL_CLASS] - b_values [MONO_INTERFACEIMPL_CLASS]; if (klass) return klass; return a_values [MONO_INTERFACEIMPL_INTERFACE] - b_values [MONO_INTERFACEIMPL_INTERFACE]; } static void pad_heap (MonoDynamicStream *sh) { if (sh->index & 3) { int sz = 4 - (sh->index & 3); memset (sh->data + sh->index, 0, sz); sh->index += sz; } } struct StreamDesc { const char *name; MonoDynamicStream *stream; }; /* * build_compressed_metadata() fills in the blob of data that represents the * raw metadata as it will be saved in the PE file. The five streams are output * and the metadata tables are comnpressed from the guint32 array representation, * to the compressed on-disk format. */ static void build_compressed_metadata (MonoDynamicImage *assembly) { MonoDynamicTable *table; int i; guint64 valid_mask = 0; guint64 sorted_mask; guint32 heapt_size = 0; guint32 meta_size = 256; /* allow for header and other stuff */ guint32 table_offset; guint32 ntables = 0; guint64 *int64val; guint32 *int32val; guint16 *int16val; MonoImage *meta; unsigned char *p; struct StreamDesc stream_desc [5]; qsort (assembly->gen_params->pdata, assembly->gen_params->len, sizeof (gpointer), compare_genericparam); for (i = 0; i < assembly->gen_params->len; i++){ GenericParamTableEntry *entry = g_ptr_array_index (assembly->gen_params, i); write_generic_param_entry (assembly, entry); } stream_desc [0].name = "#~"; stream_desc [0].stream = &assembly->tstream; stream_desc [1].name = "#Strings"; stream_desc [1].stream = &assembly->sheap; stream_desc [2].name = "#US"; stream_desc [2].stream = &assembly->us; stream_desc [3].name = "#Blob"; stream_desc [3].stream = &assembly->blob; stream_desc [4].name = "#GUID"; stream_desc [4].stream = &assembly->guid; /* tables that are sorted */ sorted_mask = ((guint64)1 << MONO_TABLE_CONSTANT) | ((guint64)1 << MONO_TABLE_FIELDMARSHAL) | ((guint64)1 << MONO_TABLE_METHODSEMANTICS) | ((guint64)1 << MONO_TABLE_CLASSLAYOUT) | ((guint64)1 << MONO_TABLE_FIELDLAYOUT) | ((guint64)1 << MONO_TABLE_FIELDRVA) | ((guint64)1 << MONO_TABLE_IMPLMAP) | ((guint64)1 << MONO_TABLE_NESTEDCLASS) | ((guint64)1 << MONO_TABLE_METHODIMPL) | ((guint64)1 << MONO_TABLE_CUSTOMATTRIBUTE) | ((guint64)1 << MONO_TABLE_DECLSECURITY) | ((guint64)1 << MONO_TABLE_GENERICPARAM) | ((guint64)1 << MONO_TABLE_INTERFACEIMPL); /* Compute table sizes */ /* the MonoImage has already been created in mono_image_basic_init() */ meta = &assembly->image; /* sizes should be multiple of 4 */ pad_heap (&assembly->blob); pad_heap (&assembly->guid); pad_heap (&assembly->sheap); pad_heap (&assembly->us); /* Setup the info used by compute_sizes () */ meta->idx_blob_wide = assembly->blob.index >= 65536 ? 1 : 0; meta->idx_guid_wide = assembly->guid.index >= 65536 ? 1 : 0; meta->idx_string_wide = assembly->sheap.index >= 65536 ? 1 : 0; meta_size += assembly->blob.index; meta_size += assembly->guid.index; meta_size += assembly->sheap.index; meta_size += assembly->us.index; for (i=0; i < MONO_TABLE_NUM; ++i) meta->tables [i].rows = assembly->tables [i].rows; for (i = 0; i < MONO_TABLE_NUM; i++){ if (meta->tables [i].rows == 0) continue; valid_mask |= (guint64)1 << i; ntables ++; meta->tables [i].row_size = mono_metadata_compute_size ( meta, i, &meta->tables [i].size_bitfield); heapt_size += meta->tables [i].row_size * meta->tables [i].rows; } heapt_size += 24; /* #~ header size */ heapt_size += ntables * 4; /* make multiple of 4 */ heapt_size += 3; heapt_size &= ~3; meta_size += heapt_size; meta->raw_metadata = g_malloc0 (meta_size); p = (unsigned char*)meta->raw_metadata; /* the metadata signature */ *p++ = 'B'; *p++ = 'S'; *p++ = 'J'; *p++ = 'B'; /* version numbers and 4 bytes reserved */ int16val = (guint16*)p; *int16val++ = GUINT16_TO_LE (meta->md_version_major); *int16val = GUINT16_TO_LE (meta->md_version_minor); p += 8; /* version string */ int32val = (guint32*)p; *int32val = GUINT32_TO_LE ((strlen (meta->version) + 3) & (~3)); /* needs to be multiple of 4 */ p += 4; memcpy (p, meta->version, strlen (meta->version)); p += GUINT32_FROM_LE (*int32val); align_pointer (meta->raw_metadata, p); int16val = (guint16*)p; *int16val++ = GUINT16_TO_LE (0); /* flags must be 0 */ *int16val = GUINT16_TO_LE (5); /* number of streams */ p += 4; /* * write the stream info. */ table_offset = (p - (unsigned char*)meta->raw_metadata) + 5 * 8 + 40; /* room needed for stream headers */ table_offset += 3; table_offset &= ~3; assembly->tstream.index = heapt_size; for (i = 0; i < 5; ++i) { int32val = (guint32*)p; stream_desc [i].stream->offset = table_offset; *int32val++ = GUINT32_TO_LE (table_offset); *int32val = GUINT32_TO_LE (stream_desc [i].stream->index); table_offset += GUINT32_FROM_LE (*int32val); table_offset += 3; table_offset &= ~3; p += 8; strcpy ((char*)p, stream_desc [i].name); p += strlen (stream_desc [i].name) + 1; align_pointer (meta->raw_metadata, p); } /* * now copy the data, the table stream header and contents goes first. */ g_assert ((p - (unsigned char*)meta->raw_metadata) < assembly->tstream.offset); p = (guchar*)meta->raw_metadata + assembly->tstream.offset; int32val = (guint32*)p; *int32val = GUINT32_TO_LE (0); /* reserved */ p += 4; if (mono_framework_version () > 1) { *p++ = 2; /* version */ *p++ = 0; } else { *p++ = 1; /* version */ *p++ = 0; } if (meta->idx_string_wide) *p |= 0x01; if (meta->idx_guid_wide) *p |= 0x02; if (meta->idx_blob_wide) *p |= 0x04; ++p; *p++ = 1; /* reserved */ int64val = (guint64*)p; *int64val++ = GUINT64_TO_LE (valid_mask); *int64val++ = GUINT64_TO_LE (valid_mask & sorted_mask); /* bitvector of sorted tables */ p += 16; int32val = (guint32*)p; for (i = 0; i < MONO_TABLE_NUM; i++){ if (meta->tables [i].rows == 0) continue; *int32val++ = GUINT32_TO_LE (meta->tables [i].rows); } p = (unsigned char*)int32val; /* sort the tables that still need sorting */ table = &assembly->tables [MONO_TABLE_CONSTANT]; if (table->rows) qsort (table->values + MONO_CONSTANT_SIZE, table->rows, sizeof (guint32) * MONO_CONSTANT_SIZE, compare_constants); table = &assembly->tables [MONO_TABLE_METHODSEMANTICS]; if (table->rows) qsort (table->values + MONO_METHOD_SEMA_SIZE, table->rows, sizeof (guint32) * MONO_METHOD_SEMA_SIZE, compare_semantics); table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE]; if (table->rows) qsort (table->values + MONO_CUSTOM_ATTR_SIZE, table->rows, sizeof (guint32) * MONO_CUSTOM_ATTR_SIZE, compare_custom_attrs); table = &assembly->tables [MONO_TABLE_FIELDMARSHAL]; if (table->rows) qsort (table->values + MONO_FIELD_MARSHAL_SIZE, table->rows, sizeof (guint32) * MONO_FIELD_MARSHAL_SIZE, compare_field_marshal); table = &assembly->tables [MONO_TABLE_NESTEDCLASS]; if (table->rows) qsort (table->values + MONO_NESTED_CLASS_SIZE, table->rows, sizeof (guint32) * MONO_NESTED_CLASS_SIZE, compare_nested); /* Section 21.11 DeclSecurity in Partition II doesn't specify this to be sorted by MS implementation requires it */ table = &assembly->tables [MONO_TABLE_DECLSECURITY]; if (table->rows) qsort (table->values + MONO_DECL_SECURITY_SIZE, table->rows, sizeof (guint32) * MONO_DECL_SECURITY_SIZE, compare_declsecurity_attrs); table = &assembly->tables [MONO_TABLE_INTERFACEIMPL]; if (table->rows) qsort (table->values + MONO_INTERFACEIMPL_SIZE, table->rows, sizeof (guint32) * MONO_INTERFACEIMPL_SIZE, compare_interface_impl); /* compress the tables */ for (i = 0; i < MONO_TABLE_NUM; i++){ int row, col; guint32 *values; guint32 bitfield = meta->tables [i].size_bitfield; if (!meta->tables [i].rows) continue; if (assembly->tables [i].columns != mono_metadata_table_count (bitfield)) g_error ("col count mismatch in %d: %d %d", i, assembly->tables [i].columns, mono_metadata_table_count (bitfield)); meta->tables [i].base = (char*)p; for (row = 1; row <= meta->tables [i].rows; ++row) { values = assembly->tables [i].values + row * assembly->tables [i].columns; for (col = 0; col < assembly->tables [i].columns; ++col) { switch (mono_metadata_table_size (bitfield, col)) { case 1: *p++ = values [col]; break; case 2: *p++ = values [col] & 0xff; *p++ = (values [col] >> 8) & 0xff; break; case 4: *p++ = values [col] & 0xff; *p++ = (values [col] >> 8) & 0xff; *p++ = (values [col] >> 16) & 0xff; *p++ = (values [col] >> 24) & 0xff; break; default: g_assert_not_reached (); } } } g_assert ((p - (const unsigned char*)meta->tables [i].base) == (meta->tables [i].rows * meta->tables [i].row_size)); } g_assert (assembly->guid.offset + assembly->guid.index < meta_size); memcpy (meta->raw_metadata + assembly->sheap.offset, assembly->sheap.data, assembly->sheap.index); memcpy (meta->raw_metadata + assembly->us.offset, assembly->us.data, assembly->us.index); memcpy (meta->raw_metadata + assembly->blob.offset, assembly->blob.data, assembly->blob.index); memcpy (meta->raw_metadata + assembly->guid.offset, assembly->guid.data, assembly->guid.index); assembly->meta_size = assembly->guid.offset + assembly->guid.index; } /* * Some tables in metadata need to be sorted according to some criteria, but * when methods and fields are first created with reflection, they may be assigned a token * that doesn't correspond to the final token they will get assigned after the sorting. * ILGenerator.cs keeps a fixup table that maps the position of tokens in the IL code stream * with the reflection objects that represent them. Once all the tables are set up, the * reflection objects will contains the correct table index. fixup_method() will fixup the * tokens for the method with ILGenerator @ilgen. */ static void fixup_method (MonoReflectionILGen *ilgen, gpointer value, MonoDynamicImage *assembly) { guint32 code_idx = GPOINTER_TO_UINT (value); MonoReflectionILTokenInfo *iltoken; MonoReflectionFieldBuilder *field; MonoReflectionCtorBuilder *ctor; MonoReflectionMethodBuilder *method; MonoReflectionTypeBuilder *tb; MonoReflectionArrayMethod *am; guint32 i, idx = 0; unsigned char *target; for (i = 0; i < ilgen->num_token_fixups; ++i) { iltoken = (MonoReflectionILTokenInfo *)mono_array_addr_with_size (ilgen->token_fixups, sizeof (MonoReflectionILTokenInfo), i); target = (guchar*)assembly->code.data + code_idx + iltoken->code_pos; switch (target [3]) { case MONO_TABLE_FIELD: if (!strcmp (iltoken->member->vtable->klass->name, "FieldBuilder")) { field = (MonoReflectionFieldBuilder *)iltoken->member; idx = field->table_idx; } else if (!strcmp (iltoken->member->vtable->klass->name, "MonoField")) { MonoClassField *f = ((MonoReflectionField*)iltoken->member)->field; idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->field_to_table_idx, f)); } else { g_assert_not_reached (); } break; case MONO_TABLE_METHOD: if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder")) { method = (MonoReflectionMethodBuilder *)iltoken->member; idx = method->table_idx; } else if (!strcmp (iltoken->member->vtable->klass->name, "ConstructorBuilder")) { ctor = (MonoReflectionCtorBuilder *)iltoken->member; idx = ctor->table_idx; } else if (!strcmp (iltoken->member->vtable->klass->name, "MonoMethod") || !strcmp (iltoken->member->vtable->klass->name, "MonoCMethod")) { MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method; idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, m)); } else { g_assert_not_reached (); } break; case MONO_TABLE_TYPEDEF: if (strcmp (iltoken->member->vtable->klass->name, "TypeBuilder")) g_assert_not_reached (); tb = (MonoReflectionTypeBuilder *)iltoken->member; idx = tb->table_idx; break; case MONO_TABLE_MEMBERREF: if (!strcmp (iltoken->member->vtable->klass->name, "MonoArrayMethod")) { am = (MonoReflectionArrayMethod*)iltoken->member; idx = am->table_idx; } else if (!strcmp (iltoken->member->vtable->klass->name, "MonoMethod") || !strcmp (iltoken->member->vtable->klass->name, "MonoCMethod") || !strcmp (iltoken->member->vtable->klass->name, "MonoGenericMethod") || !strcmp (iltoken->member->vtable->klass->name, "MonoGenericCMethod")) { MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method; g_assert (m->klass->generic_class || m->klass->generic_container); continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "FieldBuilder")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MonoField")) { MonoClassField *f = ((MonoReflectionField*)iltoken->member)->field; g_assert (is_field_on_inst (f)); continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder") || !strcmp (iltoken->member->vtable->klass->name, "ConstructorBuilder")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "FieldOnTypeBuilderInst")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MethodOnTypeBuilderInst")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "ConstructorOnTypeBuilderInst")) { continue; } else { g_assert_not_reached (); } break; case MONO_TABLE_METHODSPEC: if (!strcmp (iltoken->member->vtable->klass->name, "MonoGenericMethod")) { MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method; g_assert (mono_method_signature (m)->generic_param_count); continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MethodOnTypeBuilderInst")) { continue; } else { g_assert_not_reached (); } break; default: g_error ("got unexpected table 0x%02x in fixup", target [3]); } target [0] = idx & 0xff; target [1] = (idx >> 8) & 0xff; target [2] = (idx >> 16) & 0xff; } } /* * fixup_cattrs: * * The CUSTOM_ATTRIBUTE table might contain METHODDEF tokens whose final * value is not known when the table is emitted. */ static void fixup_cattrs (MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint32 type, i, idx, token; MonoObject *ctor; table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE]; for (i = 0; i < table->rows; ++i) { values = table->values + ((i + 1) * MONO_CUSTOM_ATTR_SIZE); type = values [MONO_CUSTOM_ATTR_TYPE]; if ((type & MONO_CUSTOM_ATTR_TYPE_MASK) == MONO_CUSTOM_ATTR_TYPE_METHODDEF) { idx = type >> MONO_CUSTOM_ATTR_TYPE_BITS; token = mono_metadata_make_token (MONO_TABLE_METHOD, idx); ctor = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token)); g_assert (ctor); if (!strcmp (ctor->vtable->klass->name, "MonoCMethod")) { MonoMethod *m = ((MonoReflectionMethod*)ctor)->method; idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, m)); values [MONO_CUSTOM_ATTR_TYPE] = (idx << MONO_CUSTOM_ATTR_TYPE_BITS) | MONO_CUSTOM_ATTR_TYPE_METHODDEF; } } } } static void assembly_add_resource_manifest (MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly, MonoReflectionResource *rsrc, guint32 implementation) { MonoDynamicTable *table; guint32 *values; table = &assembly->tables [MONO_TABLE_MANIFESTRESOURCE]; table->rows++; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_MANIFEST_SIZE; values [MONO_MANIFEST_OFFSET] = rsrc->offset; values [MONO_MANIFEST_FLAGS] = rsrc->attrs; values [MONO_MANIFEST_NAME] = string_heap_insert_mstring (&assembly->sheap, rsrc->name); values [MONO_MANIFEST_IMPLEMENTATION] = implementation; table->next_idx++; } static void assembly_add_resource (MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly, MonoReflectionResource *rsrc) { MonoDynamicTable *table; guint32 *values; char blob_size [6]; guchar hash [20]; char *b = blob_size; char *name, *sname; guint32 idx, offset; if (rsrc->filename) { name = mono_string_to_utf8 (rsrc->filename); sname = g_path_get_basename (name); table = &assembly->tables [MONO_TABLE_FILE]; table->rows++; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_FILE_SIZE; values [MONO_FILE_FLAGS] = FILE_CONTAINS_NO_METADATA; values [MONO_FILE_NAME] = string_heap_insert (&assembly->sheap, sname); g_free (sname); mono_sha1_get_digest_from_file (name, hash); mono_metadata_encode_value (20, b, &b); values [MONO_FILE_HASH_VALUE] = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size); mono_image_add_stream_data (&assembly->blob, (char*)hash, 20); g_free (name); idx = table->next_idx++; rsrc->offset = 0; idx = MONO_IMPLEMENTATION_FILE | (idx << MONO_IMPLEMENTATION_BITS); } else { char sizebuf [4]; char *data; guint len; if (rsrc->data) { data = mono_array_addr (rsrc->data, char, 0); len = mono_array_length (rsrc->data); } else { data = NULL; len = 0; } offset = len; sizebuf [0] = offset; sizebuf [1] = offset >> 8; sizebuf [2] = offset >> 16; sizebuf [3] = offset >> 24; rsrc->offset = mono_image_add_stream_data (&assembly->resources, sizebuf, 4); mono_image_add_stream_data (&assembly->resources, data, len); if (!mb->is_main) /* * The entry should be emitted into the MANIFESTRESOURCE table of * the main module, but that needs to reference the FILE table * which isn't emitted yet. */ return; else idx = 0; } assembly_add_resource_manifest (mb, assembly, rsrc, idx); } static void set_version_from_string (MonoString *version, guint32 *values) { gchar *ver, *p, *str; guint32 i; values [MONO_ASSEMBLY_MAJOR_VERSION] = 0; values [MONO_ASSEMBLY_MINOR_VERSION] = 0; values [MONO_ASSEMBLY_REV_NUMBER] = 0; values [MONO_ASSEMBLY_BUILD_NUMBER] = 0; if (!version) return; ver = str = mono_string_to_utf8 (version); for (i = 0; i < 4; ++i) { values [MONO_ASSEMBLY_MAJOR_VERSION + i] = strtol (ver, &p, 10); switch (*p) { case '.': p++; break; case '*': /* handle Revision and Build */ p++; break; } ver = p; } g_free (str); } static guint32 load_public_key (MonoArray *pkey, MonoDynamicImage *assembly) { gsize len; guint32 token = 0; char blob_size [6]; char *b = blob_size; if (!pkey) return token; len = mono_array_length (pkey); mono_metadata_encode_value (len, b, &b); token = mono_image_add_stream_data (&assembly->blob, blob_size, b - blob_size); mono_image_add_stream_data (&assembly->blob, mono_array_addr (pkey, char, 0), len); assembly->public_key = g_malloc (len); memcpy (assembly->public_key, mono_array_addr (pkey, char, 0), len); assembly->public_key_len = len; /* Special case: check for ECMA key (16 bytes) */ if ((len == MONO_ECMA_KEY_LENGTH) && mono_is_ecma_key (mono_array_addr (pkey, char, 0), len)) { /* In this case we must reserve 128 bytes (1024 bits) for the signature */ assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH; } else if (len >= MONO_PUBLIC_KEY_HEADER_LENGTH + MONO_MINIMUM_PUBLIC_KEY_LENGTH) { /* minimum key size (in 2.0) is 384 bits */ assembly->strong_name_size = len - MONO_PUBLIC_KEY_HEADER_LENGTH; } else { /* FIXME - verifier */ g_warning ("Invalid public key length: %d bits (total: %d)", (int)MONO_PUBLIC_KEY_BIT_SIZE (len), (int)len); assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH; /* to be safe */ } assembly->strong_name = g_malloc0 (assembly->strong_name_size); return token; } static void mono_image_emit_manifest (MonoReflectionModuleBuilder *moduleb) { MonoDynamicTable *table; MonoDynamicImage *assembly; MonoReflectionAssemblyBuilder *assemblyb; MonoDomain *domain; guint32 *values; int i; guint32 module_index; assemblyb = moduleb->assemblyb; assembly = moduleb->dynamic_image; domain = mono_object_domain (assemblyb); /* Emit ASSEMBLY table */ table = &assembly->tables [MONO_TABLE_ASSEMBLY]; alloc_table (table, 1); values = table->values + MONO_ASSEMBLY_SIZE; values [MONO_ASSEMBLY_HASH_ALG] = assemblyb->algid? assemblyb->algid: ASSEMBLY_HASH_SHA1; values [MONO_ASSEMBLY_NAME] = string_heap_insert_mstring (&assembly->sheap, assemblyb->name); if (assemblyb->culture) { values [MONO_ASSEMBLY_CULTURE] = string_heap_insert_mstring (&assembly->sheap, assemblyb->culture); } else { values [MONO_ASSEMBLY_CULTURE] = string_heap_insert (&assembly->sheap, ""); } values [MONO_ASSEMBLY_PUBLIC_KEY] = load_public_key (assemblyb->public_key, assembly); values [MONO_ASSEMBLY_FLAGS] = assemblyb->flags; set_version_from_string (assemblyb->version, values); /* Emit FILE + EXPORTED_TYPE table */ module_index = 0; for (i = 0; i < mono_array_length (assemblyb->modules); ++i) { int j; MonoReflectionModuleBuilder *file_module = mono_array_get (assemblyb->modules, MonoReflectionModuleBuilder*, i); if (file_module != moduleb) { mono_image_fill_file_table (domain, (MonoReflectionModule*)file_module, assembly); module_index ++; if (file_module->types) { for (j = 0; j < file_module->num_types; ++j) { MonoReflectionTypeBuilder *tb = mono_array_get (file_module->types, MonoReflectionTypeBuilder*, j); mono_image_fill_export_table (domain, tb, module_index, 0, assembly); } } } } if (assemblyb->loaded_modules) { for (i = 0; i < mono_array_length (assemblyb->loaded_modules); ++i) { MonoReflectionModule *file_module = mono_array_get (assemblyb->loaded_modules, MonoReflectionModule*, i); mono_image_fill_file_table (domain, file_module, assembly); module_index ++; mono_image_fill_export_table_from_module (domain, file_module, module_index, assembly); } } if (assemblyb->type_forwarders) mono_image_fill_export_table_from_type_forwarders (assemblyb, assembly); /* Emit MANIFESTRESOURCE table */ module_index = 0; for (i = 0; i < mono_array_length (assemblyb->modules); ++i) { int j; MonoReflectionModuleBuilder *file_module = mono_array_get (assemblyb->modules, MonoReflectionModuleBuilder*, i); /* The table for the main module is emitted later */ if (file_module != moduleb) { module_index ++; if (file_module->resources) { int len = mono_array_length (file_module->resources); for (j = 0; j < len; ++j) { MonoReflectionResource* res = (MonoReflectionResource*)mono_array_addr (file_module->resources, MonoReflectionResource, j); assembly_add_resource_manifest (file_module, assembly, res, MONO_IMPLEMENTATION_FILE | (module_index << MONO_IMPLEMENTATION_BITS)); } } } } } #ifndef DISABLE_REFLECTION_EMIT_SAVE /* * mono_image_build_metadata() will fill the info in all the needed metadata tables * for the modulebuilder @moduleb. * At the end of the process, method and field tokens are fixed up and the * on-disk compressed metadata representation is created. */ void mono_image_build_metadata (MonoReflectionModuleBuilder *moduleb) { MonoDynamicTable *table; MonoDynamicImage *assembly; MonoReflectionAssemblyBuilder *assemblyb; MonoDomain *domain; GPtrArray *types; guint32 *values; int i, j; assemblyb = moduleb->assemblyb; assembly = moduleb->dynamic_image; domain = mono_object_domain (assemblyb); if (assembly->text_rva) return; assembly->text_rva = START_TEXT_RVA; if (moduleb->is_main) { mono_image_emit_manifest (moduleb); } table = &assembly->tables [MONO_TABLE_TYPEDEF]; table->rows = 1; /* .<Module> */ table->next_idx++; alloc_table (table, table->rows); /* * Set the first entry. */ values = table->values + table->columns; values [MONO_TYPEDEF_FLAGS] = 0; values [MONO_TYPEDEF_NAME] = string_heap_insert (&assembly->sheap, "<Module>") ; values [MONO_TYPEDEF_NAMESPACE] = string_heap_insert (&assembly->sheap, "") ; values [MONO_TYPEDEF_EXTENDS] = 0; values [MONO_TYPEDEF_FIELD_LIST] = 1; values [MONO_TYPEDEF_METHOD_LIST] = 1; /* * handle global methods * FIXME: test what to do when global methods are defined in multiple modules. */ if (moduleb->global_methods) { table = &assembly->tables [MONO_TABLE_METHOD]; table->rows += mono_array_length (moduleb->global_methods); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) mono_image_get_method_info ( mono_array_get (moduleb->global_methods, MonoReflectionMethodBuilder*, i), assembly); } if (moduleb->global_fields) { table = &assembly->tables [MONO_TABLE_FIELD]; table->rows += mono_array_length (moduleb->global_fields); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (moduleb->global_fields); ++i) mono_image_get_field_info ( mono_array_get (moduleb->global_fields, MonoReflectionFieldBuilder*, i), assembly); } table = &assembly->tables [MONO_TABLE_MODULE]; alloc_table (table, 1); mono_image_fill_module_table (domain, moduleb, assembly); /* Collect all types into a list sorted by their table_idx */ types = g_ptr_array_new (); if (moduleb->types) for (i = 0; i < moduleb->num_types; ++i) { MonoReflectionTypeBuilder *type = mono_array_get (moduleb->types, MonoReflectionTypeBuilder*, i); collect_types (types, type); } g_ptr_array_sort (types, (GCompareFunc)compare_types_by_table_idx); table = &assembly->tables [MONO_TABLE_TYPEDEF]; table->rows += types->len; alloc_table (table, table->rows); /* * Emit type names + namespaces at one place inside the string heap, * so load_class_names () needs to touch fewer pages. */ for (i = 0; i < types->len; ++i) { MonoReflectionTypeBuilder *tb = g_ptr_array_index (types, i); string_heap_insert_mstring (&assembly->sheap, tb->nspace); } for (i = 0; i < types->len; ++i) { MonoReflectionTypeBuilder *tb = g_ptr_array_index (types, i); string_heap_insert_mstring (&assembly->sheap, tb->name); } for (i = 0; i < types->len; ++i) { MonoReflectionTypeBuilder *type = g_ptr_array_index (types, i); mono_image_get_type_info (domain, type, assembly); } /* * table->rows is already set above and in mono_image_fill_module_table. */ /* add all the custom attributes at the end, once all the indexes are stable */ mono_image_add_cattrs (assembly, 1, MONO_CUSTOM_ATTR_ASSEMBLY, assemblyb->cattrs); /* CAS assembly permissions */ if (assemblyb->permissions_minimum) mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_minimum); if (assemblyb->permissions_optional) mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_optional); if (assemblyb->permissions_refused) mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_refused); module_add_cattrs (assembly, moduleb); /* fixup tokens */ mono_g_hash_table_foreach (assembly->token_fixups, (GHFunc)fixup_method, assembly); /* Create the MethodImpl table. We do this after emitting all methods so we already know * the final tokens and don't need another fixup pass. */ if (moduleb->global_methods) { for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) { MonoReflectionMethodBuilder *mb = mono_array_get ( moduleb->global_methods, MonoReflectionMethodBuilder*, i); mono_image_add_methodimpl (assembly, mb); } } for (i = 0; i < types->len; ++i) { MonoReflectionTypeBuilder *type = g_ptr_array_index (types, i); if (type->methods) { for (j = 0; j < type->num_methods; ++j) { MonoReflectionMethodBuilder *mb = mono_array_get ( type->methods, MonoReflectionMethodBuilder*, j); mono_image_add_methodimpl (assembly, mb); } } } g_ptr_array_free (types, TRUE); fixup_cattrs (assembly); } #else /* DISABLE_REFLECTION_EMIT_SAVE */ void mono_image_build_metadata (MonoReflectionModuleBuilder *moduleb) { g_error ("This mono runtime was configured with --enable-minimal=reflection_emit_save, so saving of dynamic assemblies is not supported."); } #endif /* DISABLE_REFLECTION_EMIT_SAVE */ typedef struct { guint32 import_lookup_table; guint32 timestamp; guint32 forwarder; guint32 name_rva; guint32 import_address_table_rva; } MonoIDT; typedef struct { guint32 name_rva; guint32 flags; } MonoILT; #ifndef DISABLE_REFLECTION_EMIT /* * mono_image_insert_string: * @module: module builder object * @str: a string * * Insert @str into the user string stream of @module. */ guint32 mono_image_insert_string (MonoReflectionModuleBuilder *module, MonoString *str) { MonoDynamicImage *assembly; guint32 idx; char buf [16]; char *b = buf; MONO_ARCH_SAVE_REGS; if (!module->dynamic_image) mono_image_module_basic_init (module); assembly = module->dynamic_image; if (assembly->save) { mono_metadata_encode_value (1 | (str->length * 2), b, &b); idx = mono_image_add_stream_data (&assembly->us, buf, b-buf); #if G_BYTE_ORDER != G_LITTLE_ENDIAN { char *swapped = g_malloc (2 * mono_string_length (str)); const char *p = (const char*)mono_string_chars (str); swap_with_size (swapped, p, 2, mono_string_length (str)); mono_image_add_stream_data (&assembly->us, swapped, str->length * 2); g_free (swapped); } #else mono_image_add_stream_data (&assembly->us, (const char*)mono_string_chars (str), str->length * 2); #endif mono_image_add_stream_data (&assembly->us, "", 1); } else { idx = assembly->us.index ++; } mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (MONO_TOKEN_STRING | idx), str); return MONO_TOKEN_STRING | idx; } guint32 mono_image_create_method_token (MonoDynamicImage *assembly, MonoObject *obj, MonoArray *opt_param_types) { MonoClass *klass; guint32 token = 0; klass = obj->vtable->klass; if (strcmp (klass->name, "MonoMethod") == 0) { MonoMethod *method = ((MonoReflectionMethod *)obj)->method; MonoMethodSignature *sig, *old; guint32 sig_token, parent; int nargs, i; g_assert (opt_param_types && (mono_method_signature (method)->sentinelpos >= 0)); nargs = mono_array_length (opt_param_types); old = mono_method_signature (method); sig = mono_metadata_signature_alloc ( &assembly->image, old->param_count + nargs); sig->hasthis = old->hasthis; sig->explicit_this = old->explicit_this; sig->call_convention = old->call_convention; sig->generic_param_count = old->generic_param_count; sig->param_count = old->param_count + nargs; sig->sentinelpos = old->param_count; sig->ret = old->ret; for (i = 0; i < old->param_count; i++) sig->params [i] = old->params [i]; for (i = 0; i < nargs; i++) { MonoReflectionType *rt = mono_array_get (opt_param_types, MonoReflectionType *, i); sig->params [old->param_count + i] = mono_reflection_type_get_handle (rt); } parent = mono_image_typedef_or_ref (assembly, &method->klass->byval_arg); g_assert ((parent & MONO_TYPEDEFORREF_MASK) == MONO_MEMBERREF_PARENT_TYPEREF); parent >>= MONO_TYPEDEFORREF_BITS; parent <<= MONO_MEMBERREF_PARENT_BITS; parent |= MONO_MEMBERREF_PARENT_TYPEREF; sig_token = method_encode_signature (assembly, sig); token = mono_image_get_varargs_method_token (assembly, parent, method->name, sig_token); } else if (strcmp (klass->name, "MethodBuilder") == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj; ReflectionMethodBuilder rmb; guint32 parent, sig; char *name; reflection_methodbuilder_from_method_builder (&rmb, mb); rmb.opt_types = opt_param_types; sig = method_builder_encode_signature (assembly, &rmb); parent = mono_image_create_token (assembly, obj, TRUE, TRUE); g_assert (mono_metadata_token_table (parent) == MONO_TABLE_METHOD); parent = mono_metadata_token_index (parent) << MONO_MEMBERREF_PARENT_BITS; parent |= MONO_MEMBERREF_PARENT_METHODDEF; name = mono_string_to_utf8 (rmb.name); token = mono_image_get_varargs_method_token ( assembly, parent, name, sig); g_free (name); } else { g_error ("requested method token for %s\n", klass->name); } return token; } /* * mono_image_create_token: * @assembly: a dynamic assembly * @obj: * @register_token: Whenever to register the token in the assembly->tokens hash. * * Get a token to insert in the IL code stream for the given MemberInfo. * The metadata emission routines need to pass FALSE as REGISTER_TOKEN, since by that time, * the table_idx-es were recomputed, so registering the token would overwrite an existing * entry. */ guint32 mono_image_create_token (MonoDynamicImage *assembly, MonoObject *obj, gboolean create_methodspec, gboolean register_token) { MonoClass *klass; guint32 token = 0; klass = obj->vtable->klass; /* Check for user defined reflection objects */ /* TypeDelegator is the only corlib type which doesn't look like a MonoReflectionType */ if (klass->image != mono_defaults.corlib || (strcmp (klass->name, "TypeDelegator") == 0)) mono_raise_exception (mono_get_exception_not_supported ("User defined subclasses of System.Type are not yet supported")); \ if (strcmp (klass->name, "MethodBuilder") == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj; MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type; if (tb->module->dynamic_image == assembly && !tb->generic_params && !mb->generic_params) token = mb->table_idx | MONO_TOKEN_METHOD_DEF; else token = mono_image_get_methodbuilder_token (assembly, mb, create_methodspec); /*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/ } else if (strcmp (klass->name, "ConstructorBuilder") == 0) { MonoReflectionCtorBuilder *mb = (MonoReflectionCtorBuilder *)obj; MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type; if (tb->module->dynamic_image == assembly && !tb->generic_params) token = mb->table_idx | MONO_TOKEN_METHOD_DEF; else token = mono_image_get_ctorbuilder_token (assembly, mb); /*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/ } else if (strcmp (klass->name, "FieldBuilder") == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)obj; MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)fb->typeb; if (tb->generic_params) { token = mono_image_get_generic_field_token (assembly, fb); } else { token = fb->table_idx | MONO_TOKEN_FIELD_DEF; } } else if (strcmp (klass->name, "TypeBuilder") == 0) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)obj; token = tb->table_idx | MONO_TOKEN_TYPE_DEF; } else if (strcmp (klass->name, "MonoType") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); MonoClass *mc = mono_class_from_mono_type (type); token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref_full (assembly, type, mc->generic_container == NULL)); } else if (strcmp (klass->name, "GenericTypeParameterBuilder") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref (assembly, type)); } else if (strcmp (klass->name, "MonoGenericClass") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref (assembly, type)); } else if (strcmp (klass->name, "MonoCMethod") == 0 || strcmp (klass->name, "MonoMethod") == 0 || strcmp (klass->name, "MonoGenericMethod") == 0 || strcmp (klass->name, "MonoGenericCMethod") == 0) { MonoReflectionMethod *m = (MonoReflectionMethod *)obj; if (m->method->is_inflated) { if (create_methodspec) token = mono_image_get_methodspec_token (assembly, m->method); else token = mono_image_get_inflated_method_token (assembly, m->method); } else if ((m->method->klass->image == &assembly->image) && !m->method->klass->generic_class) { static guint32 method_table_idx = 0xffffff; if (m->method->klass->wastypebuilder) { /* we use the same token as the one that was assigned * to the Methodbuilder. * FIXME: do the equivalent for Fields. */ token = m->method->token; } else { /* * Each token should have a unique index, but the indexes are * assigned by managed code, so we don't know about them. An * easy solution is to count backwards... */ method_table_idx --; token = MONO_TOKEN_METHOD_DEF | method_table_idx; } } else { token = mono_image_get_methodref_token (assembly, m->method, create_methodspec); } /*g_print ("got token 0x%08x for %s\n", token, m->method->name);*/ } else if (strcmp (klass->name, "MonoField") == 0) { MonoReflectionField *f = (MonoReflectionField *)obj; if ((f->field->parent->image == &assembly->image) && !is_field_on_inst (f->field)) { static guint32 field_table_idx = 0xffffff; field_table_idx --; token = MONO_TOKEN_FIELD_DEF | field_table_idx; } else { token = mono_image_get_fieldref_token (assembly, f); } /*g_print ("got token 0x%08x for %s\n", token, f->field->name);*/ } else if (strcmp (klass->name, "MonoArrayMethod") == 0) { MonoReflectionArrayMethod *m = (MonoReflectionArrayMethod *)obj; token = mono_image_get_array_token (assembly, m); } else if (strcmp (klass->name, "SignatureHelper") == 0) { MonoReflectionSigHelper *s = (MonoReflectionSigHelper*)obj; token = MONO_TOKEN_SIGNATURE | mono_image_get_sighelper_token (assembly, s); } else if (strcmp (klass->name, "EnumBuilder") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref (assembly, type)); } else if (strcmp (klass->name, "FieldOnTypeBuilderInst") == 0) { MonoReflectionFieldOnTypeBuilderInst *f = (MonoReflectionFieldOnTypeBuilderInst*)obj; token = mono_image_get_field_on_inst_token (assembly, f); } else if (strcmp (klass->name, "ConstructorOnTypeBuilderInst") == 0) { MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)obj; token = mono_image_get_ctor_on_inst_token (assembly, c, create_methodspec); } else if (strcmp (klass->name, "MethodOnTypeBuilderInst") == 0) { MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)obj; token = mono_image_get_method_on_inst_token (assembly, m, create_methodspec); } else if (is_sre_array (klass) || is_sre_byref (klass) || is_sre_pointer (klass)) { MonoReflectionType *type = (MonoReflectionType *)obj; token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (type))); } else { g_error ("requested token for %s\n", klass->name); } if (register_token) mono_image_register_token (assembly, token, obj); return token; } /* * mono_image_register_token: * * Register the TOKEN->OBJ mapping in the mapping table in ASSEMBLY. This is required for * the Module.ResolveXXXToken () methods to work. */ void mono_image_register_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj) { MonoObject *prev = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token)); if (prev) { /* There could be multiple MethodInfo objects with the same token */ //g_assert (prev == obj); } else { mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), obj); } } static MonoDynamicImage* create_dynamic_mono_image (MonoDynamicAssembly *assembly, char *assembly_name, char *module_name) { static const guchar entrycode [16] = {0xff, 0x25, 0}; MonoDynamicImage *image; int i; const char *version; if (!strcmp (mono_get_runtime_info ()->framework_version, "2.1")) version = "v2.0.50727"; /* HACK: SL 2 enforces the .net 2 metadata version */ else version = mono_get_runtime_info ()->runtime_version; #if HAVE_BOEHM_GC image = GC_MALLOC (sizeof (MonoDynamicImage)); #else image = g_new0 (MonoDynamicImage, 1); #endif mono_profiler_module_event (&image->image, MONO_PROFILE_START_LOAD); /*g_print ("created image %p\n", image);*/ /* keep in sync with image.c */ image->image.name = assembly_name; image->image.assembly_name = image->image.name; /* they may be different */ image->image.module_name = module_name; image->image.version = g_strdup (version); image->image.md_version_major = 1; image->image.md_version_minor = 1; image->image.dynamic = TRUE; image->image.references = g_new0 (MonoAssembly*, 1); image->image.references [0] = NULL; mono_image_init (&image->image); image->token_fixups = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC); image->method_to_table_idx = g_hash_table_new (NULL, NULL); image->field_to_table_idx = g_hash_table_new (NULL, NULL); image->method_aux_hash = g_hash_table_new (NULL, NULL); image->handleref = g_hash_table_new (NULL, NULL); image->tokens = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC); image->generic_def_objects = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC); image->methodspec = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC); image->typespec = g_hash_table_new ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal); image->typeref = g_hash_table_new ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal); image->blob_cache = g_hash_table_new ((GHashFunc)mono_blob_entry_hash, (GCompareFunc)mono_blob_entry_equal); image->gen_params = g_ptr_array_new (); /*g_print ("string heap create for image %p (%s)\n", image, module_name);*/ string_heap_init (&image->sheap); mono_image_add_stream_data (&image->us, "", 1); add_to_blob_cached (image, (char*) "", 1, NULL, 0); /* import tables... */ mono_image_add_stream_data (&image->code, (char*)entrycode, sizeof (entrycode)); image->iat_offset = mono_image_add_stream_zero (&image->code, 8); /* two IAT entries */ image->idt_offset = mono_image_add_stream_zero (&image->code, 2 * sizeof (MonoIDT)); /* two IDT entries */ image->imp_names_offset = mono_image_add_stream_zero (&image->code, 2); /* flags for name entry */ mono_image_add_stream_data (&image->code, "_CorExeMain", 12); mono_image_add_stream_data (&image->code, "mscoree.dll", 12); image->ilt_offset = mono_image_add_stream_zero (&image->code, 8); /* two ILT entries */ stream_data_align (&image->code); image->cli_header_offset = mono_image_add_stream_zero (&image->code, sizeof (MonoCLIHeader)); for (i=0; i < MONO_TABLE_NUM; ++i) { image->tables [i].next_idx = 1; image->tables [i].columns = table_sizes [i]; } image->image.assembly = (MonoAssembly*)assembly; image->run = assembly->run; image->save = assembly->save; image->pe_kind = 0x1; /* ILOnly */ image->machine = 0x14c; /* I386 */ mono_profiler_module_loaded (&image->image, MONO_PROFILE_OK); return image; } #endif static void free_blob_cache_entry (gpointer key, gpointer val, gpointer user_data) { g_free (key); } void mono_dynamic_image_free (MonoDynamicImage *image) { MonoDynamicImage *di = image; GList *list; int i; if (di->methodspec) mono_g_hash_table_destroy (di->methodspec); if (di->typespec) g_hash_table_destroy (di->typespec); if (di->typeref) g_hash_table_destroy (di->typeref); if (di->handleref) g_hash_table_destroy (di->handleref); if (di->tokens) mono_g_hash_table_destroy (di->tokens); if (di->generic_def_objects) mono_g_hash_table_destroy (di->generic_def_objects); if (di->blob_cache) { g_hash_table_foreach (di->blob_cache, free_blob_cache_entry, NULL); g_hash_table_destroy (di->blob_cache); } if (di->standalonesig_cache) g_hash_table_destroy (di->standalonesig_cache); for (list = di->array_methods; list; list = list->next) { ArrayMethod *am = (ArrayMethod *)list->data; g_free (am->sig); g_free (am->name); g_free (am); } g_list_free (di->array_methods); if (di->gen_params) { for (i = 0; i < di->gen_params->len; i++) { GenericParamTableEntry *entry = g_ptr_array_index (di->gen_params, i); if (entry->gparam->type.type) { MonoGenericParam *param = entry->gparam->type.type->data.generic_param; g_free ((char*)mono_generic_param_info (param)->name); g_free (param); } g_free (entry); } g_ptr_array_free (di->gen_params, TRUE); } if (di->token_fixups) mono_g_hash_table_destroy (di->token_fixups); if (di->method_to_table_idx) g_hash_table_destroy (di->method_to_table_idx); if (di->field_to_table_idx) g_hash_table_destroy (di->field_to_table_idx); if (di->method_aux_hash) g_hash_table_destroy (di->method_aux_hash); g_free (di->strong_name); g_free (di->win32_res); if (di->public_key) g_free (di->public_key); /*g_print ("string heap destroy for image %p\n", di);*/ mono_dynamic_stream_reset (&di->sheap); mono_dynamic_stream_reset (&di->code); mono_dynamic_stream_reset (&di->resources); mono_dynamic_stream_reset (&di->us); mono_dynamic_stream_reset (&di->blob); mono_dynamic_stream_reset (&di->tstream); mono_dynamic_stream_reset (&di->guid); for (i = 0; i < MONO_TABLE_NUM; ++i) { g_free (di->tables [i].values); } } #ifndef DISABLE_REFLECTION_EMIT /* * mono_image_basic_init: * @assembly: an assembly builder object * * Create the MonoImage that represents the assembly builder and setup some * of the helper hash table and the basic metadata streams. */ void mono_image_basic_init (MonoReflectionAssemblyBuilder *assemblyb) { MonoDynamicAssembly *assembly; MonoDynamicImage *image; MonoDomain *domain = mono_object_domain (assemblyb); MONO_ARCH_SAVE_REGS; if (assemblyb->dynamic_assembly) return; #if HAVE_BOEHM_GC assembly = assemblyb->dynamic_assembly = GC_MALLOC (sizeof (MonoDynamicAssembly)); #else assembly = assemblyb->dynamic_assembly = g_new0 (MonoDynamicAssembly, 1); #endif mono_profiler_assembly_event (&assembly->assembly, MONO_PROFILE_START_LOAD); assembly->assembly.ref_count = 1; assembly->assembly.dynamic = TRUE; assembly->assembly.corlib_internal = assemblyb->corlib_internal; assemblyb->assembly.assembly = (MonoAssembly*)assembly; assembly->assembly.basedir = mono_string_to_utf8 (assemblyb->dir); if (assemblyb->culture) assembly->assembly.aname.culture = mono_string_to_utf8 (assemblyb->culture); else assembly->assembly.aname.culture = g_strdup (""); if (assemblyb->version) { char *vstr = mono_string_to_utf8 (assemblyb->version); char **version = g_strsplit (vstr, ".", 4); char **parts = version; assembly->assembly.aname.major = atoi (*parts++); assembly->assembly.aname.minor = atoi (*parts++); assembly->assembly.aname.build = *parts != NULL ? atoi (*parts++) : 0; assembly->assembly.aname.revision = *parts != NULL ? atoi (*parts) : 0; g_strfreev (version); g_free (vstr); } else { assembly->assembly.aname.major = 0; assembly->assembly.aname.minor = 0; assembly->assembly.aname.build = 0; assembly->assembly.aname.revision = 0; } assembly->run = assemblyb->access != 2; assembly->save = assemblyb->access != 1; assembly->domain = domain; image = create_dynamic_mono_image (assembly, mono_string_to_utf8 (assemblyb->name), g_strdup ("RefEmit_YouForgotToDefineAModule")); image->initial_image = TRUE; assembly->assembly.aname.name = image->image.name; assembly->assembly.image = &image->image; if (assemblyb->pktoken && assemblyb->pktoken->max_length) { /* -1 to correct for the trailing NULL byte */ if (assemblyb->pktoken->max_length != MONO_PUBLIC_KEY_TOKEN_LENGTH - 1) { g_error ("Public key token length invalid for assembly %s: %i", assembly->assembly.aname.name, assemblyb->pktoken->max_length); } memcpy (&assembly->assembly.aname.public_key_token, mono_array_addr (assemblyb->pktoken, guint8, 0), assemblyb->pktoken->max_length); } mono_domain_assemblies_lock (domain); domain->domain_assemblies = g_slist_prepend (domain->domain_assemblies, assembly); mono_domain_assemblies_unlock (domain); register_assembly (mono_object_domain (assemblyb), &assemblyb->assembly, &assembly->assembly); mono_profiler_assembly_loaded (&assembly->assembly, MONO_PROFILE_OK); mono_assembly_invoke_load_hook ((MonoAssembly*)assembly); } #endif /* !DISABLE_REFLECTION_EMIT */ #ifndef DISABLE_REFLECTION_EMIT_SAVE static int calc_section_size (MonoDynamicImage *assembly) { int nsections = 0; /* alignment constraints */ mono_image_add_stream_zero (&assembly->code, 4 - (assembly->code.index % 4)); g_assert ((assembly->code.index % 4) == 0); assembly->meta_size += 3; assembly->meta_size &= ~3; mono_image_add_stream_zero (&assembly->resources, 4 - (assembly->resources.index % 4)); g_assert ((assembly->resources.index % 4) == 0); assembly->sections [MONO_SECTION_TEXT].size = assembly->meta_size + assembly->code.index + assembly->resources.index + assembly->strong_name_size; assembly->sections [MONO_SECTION_TEXT].attrs = SECT_FLAGS_HAS_CODE | SECT_FLAGS_MEM_EXECUTE | SECT_FLAGS_MEM_READ; nsections++; if (assembly->win32_res) { guint32 res_size = (assembly->win32_res_size + 3) & ~3; assembly->sections [MONO_SECTION_RSRC].size = res_size; assembly->sections [MONO_SECTION_RSRC].attrs = SECT_FLAGS_HAS_INITIALIZED_DATA | SECT_FLAGS_MEM_READ; nsections++; } assembly->sections [MONO_SECTION_RELOC].size = 12; assembly->sections [MONO_SECTION_RELOC].attrs = SECT_FLAGS_MEM_READ | SECT_FLAGS_MEM_DISCARDABLE | SECT_FLAGS_HAS_INITIALIZED_DATA; nsections++; return nsections; } typedef struct { guint32 id; guint32 offset; GSList *children; MonoReflectionWin32Resource *win32_res; /* Only for leaf nodes */ } ResTreeNode; static int resource_tree_compare_by_id (gconstpointer a, gconstpointer b) { ResTreeNode *t1 = (ResTreeNode*)a; ResTreeNode *t2 = (ResTreeNode*)b; return t1->id - t2->id; } /* * resource_tree_create: * * Organize the resources into a resource tree. */ static ResTreeNode * resource_tree_create (MonoArray *win32_resources) { ResTreeNode *tree, *res_node, *type_node, *lang_node; GSList *l; int i; tree = g_new0 (ResTreeNode, 1); for (i = 0; i < mono_array_length (win32_resources); ++i) { MonoReflectionWin32Resource *win32_res = (MonoReflectionWin32Resource*)mono_array_addr (win32_resources, MonoReflectionWin32Resource, i); /* Create node */ /* FIXME: BUG: this stores managed references in unmanaged memory */ lang_node = g_new0 (ResTreeNode, 1); lang_node->id = win32_res->lang_id; lang_node->win32_res = win32_res; /* Create type node if neccesary */ type_node = NULL; for (l = tree->children; l; l = l->next) if (((ResTreeNode*)(l->data))->id == win32_res->res_type) { type_node = (ResTreeNode*)l->data; break; } if (!type_node) { type_node = g_new0 (ResTreeNode, 1); type_node->id = win32_res->res_type; /* * The resource types have to be sorted otherwise * Windows Explorer can't display the version information. */ tree->children = g_slist_insert_sorted (tree->children, type_node, resource_tree_compare_by_id); } /* Create res node if neccesary */ res_node = NULL; for (l = type_node->children; l; l = l->next) if (((ResTreeNode*)(l->data))->id == win32_res->res_id) { res_node = (ResTreeNode*)l->data; break; } if (!res_node) { res_node = g_new0 (ResTreeNode, 1); res_node->id = win32_res->res_id; type_node->children = g_slist_append (type_node->children, res_node); } res_node->children = g_slist_append (res_node->children, lang_node); } return tree; } /* * resource_tree_encode: * * Encode the resource tree into the format used in the PE file. */ static void resource_tree_encode (ResTreeNode *node, char *begin, char *p, char **endbuf) { char *entries; MonoPEResourceDir dir; MonoPEResourceDirEntry dir_entry; MonoPEResourceDataEntry data_entry; GSList *l; guint32 res_id_entries; /* * For the format of the resource directory, see the article * "An In-Depth Look into the Win32 Portable Executable File Format" by * Matt Pietrek */ memset (&dir, 0, sizeof (dir)); memset (&dir_entry, 0, sizeof (dir_entry)); memset (&data_entry, 0, sizeof (data_entry)); g_assert (sizeof (dir) == 16); g_assert (sizeof (dir_entry) == 8); g_assert (sizeof (data_entry) == 16); node->offset = p - begin; /* IMAGE_RESOURCE_DIRECTORY */ res_id_entries = g_slist_length (node->children); dir.res_id_entries = GUINT16_TO_LE (res_id_entries); memcpy (p, &dir, sizeof (dir)); p += sizeof (dir); /* Reserve space for entries */ entries = p; p += sizeof (dir_entry) * res_id_entries; /* Write children */ for (l = node->children; l; l = l->next) { ResTreeNode *child = (ResTreeNode*)l->data; if (child->win32_res) { guint32 size; child->offset = p - begin; /* IMAGE_RESOURCE_DATA_ENTRY */ data_entry.rde_data_offset = GUINT32_TO_LE (p - begin + sizeof (data_entry)); size = mono_array_length (child->win32_res->res_data); data_entry.rde_size = GUINT32_TO_LE (size); memcpy (p, &data_entry, sizeof (data_entry)); p += sizeof (data_entry); memcpy (p, mono_array_addr (child->win32_res->res_data, char, 0), size); p += size; } else { resource_tree_encode (child, begin, p, &p); } } /* IMAGE_RESOURCE_ENTRY */ for (l = node->children; l; l = l->next) { ResTreeNode *child = (ResTreeNode*)l->data; MONO_PE_RES_DIR_ENTRY_SET_NAME (dir_entry, FALSE, child->id); MONO_PE_RES_DIR_ENTRY_SET_DIR (dir_entry, !child->win32_res, child->offset); memcpy (entries, &dir_entry, sizeof (dir_entry)); entries += sizeof (dir_entry); } *endbuf = p; } static void resource_tree_free (ResTreeNode * node) { GSList * list; for (list = node->children; list; list = list->next) resource_tree_free ((ResTreeNode*)list->data); g_slist_free(node->children); g_free (node); } static void assembly_add_win32_resources (MonoDynamicImage *assembly, MonoReflectionAssemblyBuilder *assemblyb) { char *buf; char *p; guint32 size, i; MonoReflectionWin32Resource *win32_res; ResTreeNode *tree; if (!assemblyb->win32_resources) return; /* * Resources are stored in a three level tree inside the PE file. * - level one contains a node for each type of resource * - level two contains a node for each resource * - level three contains a node for each instance of a resource for a * specific language. */ tree = resource_tree_create (assemblyb->win32_resources); /* Estimate the size of the encoded tree */ size = 0; for (i = 0; i < mono_array_length (assemblyb->win32_resources); ++i) { win32_res = (MonoReflectionWin32Resource*)mono_array_addr (assemblyb->win32_resources, MonoReflectionWin32Resource, i); size += mono_array_length (win32_res->res_data); } /* Directory structure */ size += mono_array_length (assemblyb->win32_resources) * 256; p = buf = g_malloc (size); resource_tree_encode (tree, p, p, &p); g_assert (p - buf <= size); assembly->win32_res = g_malloc (p - buf); assembly->win32_res_size = p - buf; memcpy (assembly->win32_res, buf, p - buf); g_free (buf); resource_tree_free (tree); } static void fixup_resource_directory (char *res_section, char *p, guint32 rva) { MonoPEResourceDir *dir = (MonoPEResourceDir*)p; int i; p += sizeof (MonoPEResourceDir); for (i = 0; i < GUINT16_FROM_LE (dir->res_named_entries) + GUINT16_FROM_LE (dir->res_id_entries); ++i) { MonoPEResourceDirEntry *dir_entry = (MonoPEResourceDirEntry*)p; char *child = res_section + MONO_PE_RES_DIR_ENTRY_DIR_OFFSET (*dir_entry); if (MONO_PE_RES_DIR_ENTRY_IS_DIR (*dir_entry)) { fixup_resource_directory (res_section, child, rva); } else { MonoPEResourceDataEntry *data_entry = (MonoPEResourceDataEntry*)child; data_entry->rde_data_offset = GUINT32_TO_LE (GUINT32_FROM_LE (data_entry->rde_data_offset) + rva); } p += sizeof (MonoPEResourceDirEntry); } } static void checked_write_file (HANDLE f, gconstpointer buffer, guint32 numbytes) { guint32 dummy; if (!WriteFile (f, buffer, numbytes, &dummy, NULL)) g_error ("WriteFile returned %d\n", GetLastError ()); } /* * mono_image_create_pefile: * @mb: a module builder object * * This function creates the PE-COFF header, the image sections, the CLI header * etc. all the data is written in * assembly->pefile where it can be easily retrieved later in chunks. */ void mono_image_create_pefile (MonoReflectionModuleBuilder *mb, HANDLE file) { MonoMSDOSHeader *msdos; MonoDotNetHeader *header; MonoSectionTable *section; MonoCLIHeader *cli_header; guint32 size, image_size, virtual_base, text_offset; guint32 header_start, section_start, file_offset, virtual_offset; MonoDynamicImage *assembly; MonoReflectionAssemblyBuilder *assemblyb; MonoDynamicStream pefile_stream = {0}; MonoDynamicStream *pefile = &pefile_stream; int i, nsections; guint32 *rva, value; guchar *p; static const unsigned char msheader[] = { 0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x0e, 0x1f, 0xba, 0x0e, 0x00, 0xb4, 0x09, 0xcd, 0x21, 0xb8, 0x01, 0x4c, 0xcd, 0x21, 0x54, 0x68, 0x69, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x75, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x44, 0x4f, 0x53, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x2e, 0x0d, 0x0d, 0x0a, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; assemblyb = mb->assemblyb; mono_image_basic_init (assemblyb); assembly = mb->dynamic_image; assembly->pe_kind = assemblyb->pe_kind; assembly->machine = assemblyb->machine; ((MonoDynamicImage*)assemblyb->dynamic_assembly->assembly.image)->pe_kind = assemblyb->pe_kind; ((MonoDynamicImage*)assemblyb->dynamic_assembly->assembly.image)->machine = assemblyb->machine; mono_image_build_metadata (mb); if (mb->is_main && assemblyb->resources) { int len = mono_array_length (assemblyb->resources); for (i = 0; i < len; ++i) assembly_add_resource (mb, assembly, (MonoReflectionResource*)mono_array_addr (assemblyb->resources, MonoReflectionResource, i)); } if (mb->resources) { int len = mono_array_length (mb->resources); for (i = 0; i < len; ++i) assembly_add_resource (mb, assembly, (MonoReflectionResource*)mono_array_addr (mb->resources, MonoReflectionResource, i)); } build_compressed_metadata (assembly); if (mb->is_main) assembly_add_win32_resources (assembly, assemblyb); nsections = calc_section_size (assembly); /* The DOS header and stub */ g_assert (sizeof (MonoMSDOSHeader) == sizeof (msheader)); mono_image_add_stream_data (pefile, (char*)msheader, sizeof (msheader)); /* the dotnet header */ header_start = mono_image_add_stream_zero (pefile, sizeof (MonoDotNetHeader)); /* the section tables */ section_start = mono_image_add_stream_zero (pefile, sizeof (MonoSectionTable) * nsections); file_offset = section_start + sizeof (MonoSectionTable) * nsections; virtual_offset = VIRT_ALIGN; image_size = 0; for (i = 0; i < MONO_SECTION_MAX; ++i) { if (!assembly->sections [i].size) continue; /* align offsets */ file_offset += FILE_ALIGN - 1; file_offset &= ~(FILE_ALIGN - 1); virtual_offset += VIRT_ALIGN - 1; virtual_offset &= ~(VIRT_ALIGN - 1); assembly->sections [i].offset = file_offset; assembly->sections [i].rva = virtual_offset; file_offset += assembly->sections [i].size; virtual_offset += assembly->sections [i].size; image_size += (assembly->sections [i].size + VIRT_ALIGN - 1) & ~(VIRT_ALIGN - 1); } file_offset += FILE_ALIGN - 1; file_offset &= ~(FILE_ALIGN - 1); image_size += section_start + sizeof (MonoSectionTable) * nsections; /* back-patch info */ msdos = (MonoMSDOSHeader*)pefile->data; msdos->pe_offset = GUINT32_FROM_LE (sizeof (MonoMSDOSHeader)); header = (MonoDotNetHeader*)(pefile->data + header_start); header->pesig [0] = 'P'; header->pesig [1] = 'E'; header->coff.coff_machine = GUINT16_FROM_LE (assemblyb->machine); header->coff.coff_sections = GUINT16_FROM_LE (nsections); header->coff.coff_time = GUINT32_FROM_LE (time (NULL)); header->coff.coff_opt_header_size = GUINT16_FROM_LE (sizeof (MonoDotNetHeader) - sizeof (MonoCOFFHeader) - 4); if (assemblyb->pekind == 1) { /* it's a dll */ header->coff.coff_attributes = GUINT16_FROM_LE (0x210e); } else { /* it's an exe */ header->coff.coff_attributes = GUINT16_FROM_LE (0x010e); } virtual_base = 0x400000; /* FIXME: 0x10000000 if a DLL */ header->pe.pe_magic = GUINT16_FROM_LE (0x10B); header->pe.pe_major = 6; header->pe.pe_minor = 0; size = assembly->sections [MONO_SECTION_TEXT].size; size += FILE_ALIGN - 1; size &= ~(FILE_ALIGN - 1); header->pe.pe_code_size = GUINT32_FROM_LE(size); size = assembly->sections [MONO_SECTION_RSRC].size; size += FILE_ALIGN - 1; size &= ~(FILE_ALIGN - 1); header->pe.pe_data_size = GUINT32_FROM_LE(size); g_assert (START_TEXT_RVA == assembly->sections [MONO_SECTION_TEXT].rva); header->pe.pe_rva_code_base = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_TEXT].rva); header->pe.pe_rva_data_base = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].rva); /* pe_rva_entry_point always at the beginning of the text section */ header->pe.pe_rva_entry_point = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_TEXT].rva); header->nt.pe_image_base = GUINT32_FROM_LE (virtual_base); header->nt.pe_section_align = GUINT32_FROM_LE (VIRT_ALIGN); header->nt.pe_file_alignment = GUINT32_FROM_LE (FILE_ALIGN); header->nt.pe_os_major = GUINT16_FROM_LE (4); header->nt.pe_os_minor = GUINT16_FROM_LE (0); header->nt.pe_subsys_major = GUINT16_FROM_LE (4); size = section_start; size += FILE_ALIGN - 1; size &= ~(FILE_ALIGN - 1); header->nt.pe_header_size = GUINT32_FROM_LE (size); size = image_size; size += VIRT_ALIGN - 1; size &= ~(VIRT_ALIGN - 1); header->nt.pe_image_size = GUINT32_FROM_LE (size); /* // Translate the PEFileKind value to the value expected by the Windows loader */ { short kind; /* // PEFileKinds.Dll == 1 // PEFileKinds.ConsoleApplication == 2 // PEFileKinds.WindowApplication == 3 // // need to get: // IMAGE_SUBSYSTEM_WINDOWS_GUI 2 // Image runs in the Windows GUI subsystem. // IMAGE_SUBSYSTEM_WINDOWS_CUI 3 // Image runs in the Windows character subsystem. */ if (assemblyb->pekind == 3) kind = 2; else kind = 3; header->nt.pe_subsys_required = GUINT16_FROM_LE (kind); } header->nt.pe_stack_reserve = GUINT32_FROM_LE (0x00100000); header->nt.pe_stack_commit = GUINT32_FROM_LE (0x00001000); header->nt.pe_heap_reserve = GUINT32_FROM_LE (0x00100000); header->nt.pe_heap_commit = GUINT32_FROM_LE (0x00001000); header->nt.pe_loader_flags = GUINT32_FROM_LE (0); header->nt.pe_data_dir_count = GUINT32_FROM_LE (16); /* fill data directory entries */ header->datadir.pe_resource_table.size = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].size); header->datadir.pe_resource_table.rva = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].rva); header->datadir.pe_reloc_table.size = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RELOC].size); header->datadir.pe_reloc_table.rva = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RELOC].rva); header->datadir.pe_cli_header.size = GUINT32_FROM_LE (72); header->datadir.pe_cli_header.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->cli_header_offset); header->datadir.pe_iat.size = GUINT32_FROM_LE (8); header->datadir.pe_iat.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->iat_offset); /* patch entrypoint name */ if (assemblyb->pekind == 1) memcpy (assembly->code.data + assembly->imp_names_offset + 2, "_CorDllMain", 12); else memcpy (assembly->code.data + assembly->imp_names_offset + 2, "_CorExeMain", 12); /* patch imported function RVA name */ rva = (guint32*)(assembly->code.data + assembly->iat_offset); *rva = GUINT32_FROM_LE (assembly->text_rva + assembly->imp_names_offset); /* the import table */ header->datadir.pe_import_table.size = GUINT32_FROM_LE (79); /* FIXME: magic number? */ header->datadir.pe_import_table.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->idt_offset); /* patch imported dll RVA name and other entries in the dir */ rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, name_rva)); *rva = GUINT32_FROM_LE (assembly->text_rva + assembly->imp_names_offset + 14); /* 14 is hint+strlen+1 of func name */ rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, import_address_table_rva)); *rva = GUINT32_FROM_LE (assembly->text_rva + assembly->iat_offset); rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, import_lookup_table)); *rva = GUINT32_FROM_LE (assembly->text_rva + assembly->ilt_offset); p = (guchar*)(assembly->code.data + assembly->ilt_offset); value = (assembly->text_rva + assembly->imp_names_offset); *p++ = (value) & 0xff; *p++ = (value >> 8) & (0xff); *p++ = (value >> 16) & (0xff); *p++ = (value >> 24) & (0xff); /* the CLI header info */ cli_header = (MonoCLIHeader*)(assembly->code.data + assembly->cli_header_offset); cli_header->ch_size = GUINT32_FROM_LE (72); cli_header->ch_runtime_major = GUINT16_FROM_LE (2); if (mono_framework_version () > 1) cli_header->ch_runtime_minor = GUINT16_FROM_LE (5); else cli_header->ch_runtime_minor = GUINT16_FROM_LE (0); cli_header->ch_flags = GUINT32_FROM_LE (assemblyb->pe_kind); if (assemblyb->entry_point) { guint32 table_idx = 0; if (!strcmp (assemblyb->entry_point->object.vtable->klass->name, "MethodBuilder")) { MonoReflectionMethodBuilder *methodb = (MonoReflectionMethodBuilder*)assemblyb->entry_point; table_idx = methodb->table_idx; } else { table_idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, assemblyb->entry_point->method)); } cli_header->ch_entry_point = GUINT32_FROM_LE (table_idx | MONO_TOKEN_METHOD_DEF); } else { cli_header->ch_entry_point = GUINT32_FROM_LE (0); } /* The embedded managed resources */ text_offset = assembly->text_rva + assembly->code.index; cli_header->ch_resources.rva = GUINT32_FROM_LE (text_offset); cli_header->ch_resources.size = GUINT32_FROM_LE (assembly->resources.index); text_offset += assembly->resources.index; cli_header->ch_metadata.rva = GUINT32_FROM_LE (text_offset); cli_header->ch_metadata.size = GUINT32_FROM_LE (assembly->meta_size); text_offset += assembly->meta_size; if (assembly->strong_name_size) { cli_header->ch_strong_name.rva = GUINT32_FROM_LE (text_offset); cli_header->ch_strong_name.size = GUINT32_FROM_LE (assembly->strong_name_size); text_offset += assembly->strong_name_size; } /* write the section tables and section content */ section = (MonoSectionTable*)(pefile->data + section_start); for (i = 0; i < MONO_SECTION_MAX; ++i) { static const char section_names [][7] = { ".text", ".rsrc", ".reloc" }; if (!assembly->sections [i].size) continue; strcpy (section->st_name, section_names [i]); /*g_print ("output section %s (%d), size: %d\n", section->st_name, i, assembly->sections [i].size);*/ section->st_virtual_address = GUINT32_FROM_LE (assembly->sections [i].rva); section->st_virtual_size = GUINT32_FROM_LE (assembly->sections [i].size); section->st_raw_data_size = GUINT32_FROM_LE (GUINT32_TO_LE (section->st_virtual_size) + (FILE_ALIGN - 1)); section->st_raw_data_size &= GUINT32_FROM_LE (~(FILE_ALIGN - 1)); section->st_raw_data_ptr = GUINT32_FROM_LE (assembly->sections [i].offset); section->st_flags = GUINT32_FROM_LE (assembly->sections [i].attrs); section ++; } checked_write_file (file, pefile->data, pefile->index); mono_dynamic_stream_reset (pefile); for (i = 0; i < MONO_SECTION_MAX; ++i) { if (!assembly->sections [i].size) continue; if (SetFilePointer (file, assembly->sections [i].offset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER) g_error ("SetFilePointer returned %d\n", GetLastError ()); switch (i) { case MONO_SECTION_TEXT: /* patch entry point */ p = (guchar*)(assembly->code.data + 2); value = (virtual_base + assembly->text_rva + assembly->iat_offset); *p++ = (value) & 0xff; *p++ = (value >> 8) & 0xff; *p++ = (value >> 16) & 0xff; *p++ = (value >> 24) & 0xff; checked_write_file (file, assembly->code.data, assembly->code.index); checked_write_file (file, assembly->resources.data, assembly->resources.index); checked_write_file (file, assembly->image.raw_metadata, assembly->meta_size); checked_write_file (file, assembly->strong_name, assembly->strong_name_size); g_free (assembly->image.raw_metadata); break; case MONO_SECTION_RELOC: { struct { guint32 page_rva; guint32 block_size; guint16 type_and_offset; guint16 term; } reloc; g_assert (sizeof (reloc) == 12); reloc.page_rva = GUINT32_FROM_LE (assembly->text_rva); reloc.block_size = GUINT32_FROM_LE (12); /* * the entrypoint is always at the start of the text section * 3 is IMAGE_REL_BASED_HIGHLOW * 2 is patch_size_rva - text_rva */ reloc.type_and_offset = GUINT16_FROM_LE ((3 << 12) + (2)); reloc.term = 0; checked_write_file (file, &reloc, sizeof (reloc)); break; } case MONO_SECTION_RSRC: if (assembly->win32_res) { /* Fixup the offsets in the IMAGE_RESOURCE_DATA_ENTRY structures */ fixup_resource_directory (assembly->win32_res, assembly->win32_res, assembly->sections [i].rva); checked_write_file (file, assembly->win32_res, assembly->win32_res_size); } break; default: g_assert_not_reached (); } } /* check that the file is properly padded */ if (SetFilePointer (file, file_offset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER) g_error ("SetFilePointer returned %d\n", GetLastError ()); if (! SetEndOfFile (file)) g_error ("SetEndOfFile returned %d\n", GetLastError ()); mono_dynamic_stream_reset (&assembly->code); mono_dynamic_stream_reset (&assembly->us); mono_dynamic_stream_reset (&assembly->blob); mono_dynamic_stream_reset (&assembly->guid); mono_dynamic_stream_reset (&assembly->sheap); g_hash_table_foreach (assembly->blob_cache, (GHFunc)g_free, NULL); g_hash_table_destroy (assembly->blob_cache); assembly->blob_cache = NULL; } #else /* DISABLE_REFLECTION_EMIT_SAVE */ void mono_image_create_pefile (MonoReflectionModuleBuilder *mb, HANDLE file) { g_assert_not_reached (); } #endif /* DISABLE_REFLECTION_EMIT_SAVE */ #ifndef DISABLE_REFLECTION_EMIT MonoReflectionModule * mono_image_load_module_dynamic (MonoReflectionAssemblyBuilder *ab, MonoString *fileName) { char *name; MonoImage *image; MonoImageOpenStatus status; MonoDynamicAssembly *assembly; guint32 module_count; MonoImage **new_modules; gboolean *new_modules_loaded; name = mono_string_to_utf8 (fileName); image = mono_image_open (name, &status); if (!image) { MonoException *exc; if (status == MONO_IMAGE_ERROR_ERRNO) exc = mono_get_exception_file_not_found (fileName); else exc = mono_get_exception_bad_image_format (name); g_free (name); mono_raise_exception (exc); } g_free (name); assembly = ab->dynamic_assembly; image->assembly = (MonoAssembly*)assembly; module_count = image->assembly->image->module_count; new_modules = g_new0 (MonoImage *, module_count + 1); new_modules_loaded = g_new0 (gboolean, module_count + 1); if (image->assembly->image->modules) memcpy (new_modules, image->assembly->image->modules, module_count * sizeof (MonoImage *)); if (image->assembly->image->modules_loaded) memcpy (new_modules_loaded, image->assembly->image->modules_loaded, module_count * sizeof (gboolean)); new_modules [module_count] = image; new_modules_loaded [module_count] = TRUE; mono_image_addref (image); g_free (image->assembly->image->modules); image->assembly->image->modules = new_modules; image->assembly->image->modules_loaded = new_modules_loaded; image->assembly->image->module_count ++; mono_assembly_load_references (image, &status); if (status) { mono_image_close (image); mono_raise_exception (mono_get_exception_file_not_found (fileName)); } return mono_module_get_object (mono_domain_get (), image); } #endif /* DISABLE_REFLECTION_EMIT */ /* * We need to return always the same object for MethodInfo, FieldInfo etc.. * but we need to consider the reflected type. * type uses a different hash, since it uses custom hash/equal functions. */ typedef struct { gpointer item; MonoClass *refclass; } ReflectedEntry; static gboolean reflected_equal (gconstpointer a, gconstpointer b) { const ReflectedEntry *ea = a; const ReflectedEntry *eb = b; return (ea->item == eb->item) && (ea->refclass == eb->refclass); } static guint reflected_hash (gconstpointer a) { const ReflectedEntry *ea = a; return mono_aligned_addr_hash (ea->item); } #define CHECK_OBJECT(t,p,k) \ do { \ t _obj; \ ReflectedEntry e; \ e.item = (p); \ e.refclass = (k); \ mono_domain_lock (domain); \ if (!domain->refobject_hash) \ domain->refobject_hash = mono_g_hash_table_new_type (reflected_hash, reflected_equal, MONO_HASH_VALUE_GC); \ if ((_obj = mono_g_hash_table_lookup (domain->refobject_hash, &e))) { \ mono_domain_unlock (domain); \ return _obj; \ } \ mono_domain_unlock (domain); \ } while (0) #ifdef HAVE_BOEHM_GC /* ReflectedEntry doesn't need to be GC tracked */ #define ALLOC_REFENTRY g_new0 (ReflectedEntry, 1) #define FREE_REFENTRY(entry) g_free ((entry)) #define REFENTRY_REQUIRES_CLEANUP #else #define ALLOC_REFENTRY mono_mempool_alloc (domain->mp, sizeof (ReflectedEntry)) /* FIXME: */ #define FREE_REFENTRY(entry) #endif #define CACHE_OBJECT(t,p,o,k) \ do { \ t _obj; \ ReflectedEntry pe; \ pe.item = (p); \ pe.refclass = (k); \ mono_domain_lock (domain); \ if (!domain->refobject_hash) \ domain->refobject_hash = mono_g_hash_table_new_type (reflected_hash, reflected_equal, MONO_HASH_VALUE_GC); \ _obj = mono_g_hash_table_lookup (domain->refobject_hash, &pe); \ if (!_obj) { \ ReflectedEntry *e = ALLOC_REFENTRY; \ e->item = (p); \ e->refclass = (k); \ mono_g_hash_table_insert (domain->refobject_hash, e,o); \ _obj = o; \ } \ mono_domain_unlock (domain); \ return _obj; \ } while (0) static void clear_cached_object (MonoDomain *domain, gpointer o, MonoClass *klass) { mono_domain_lock (domain); if (domain->refobject_hash) { ReflectedEntry pe; gpointer orig_pe, orig_value; pe.item = o; pe.refclass = klass; if (mono_g_hash_table_lookup_extended (domain->refobject_hash, &pe, &orig_pe, &orig_value)) { mono_g_hash_table_remove (domain->refobject_hash, &pe); FREE_REFENTRY (orig_pe); } } mono_domain_unlock (domain); } #ifdef REFENTRY_REQUIRES_CLEANUP static void cleanup_refobject_hash (gpointer key, gpointer value, gpointer user_data) { FREE_REFENTRY (key); } #endif void mono_reflection_cleanup_domain (MonoDomain *domain) { if (domain->refobject_hash) { /*let's avoid scanning the whole hashtable if not needed*/ #ifdef REFENTRY_REQUIRES_CLEANUP mono_g_hash_table_foreach (domain->refobject_hash, cleanup_refobject_hash, NULL); #endif mono_g_hash_table_destroy (domain->refobject_hash); domain->refobject_hash = NULL; } } #ifndef DISABLE_REFLECTION_EMIT static gpointer register_assembly (MonoDomain *domain, MonoReflectionAssembly *res, MonoAssembly *assembly) { CACHE_OBJECT (MonoReflectionAssembly *, assembly, res, NULL); } static gpointer register_module (MonoDomain *domain, MonoReflectionModuleBuilder *res, MonoDynamicImage *module) { CACHE_OBJECT (MonoReflectionModuleBuilder *, module, res, NULL); } void mono_image_module_basic_init (MonoReflectionModuleBuilder *moduleb) { MonoDynamicImage *image = moduleb->dynamic_image; MonoReflectionAssemblyBuilder *ab = moduleb->assemblyb; if (!image) { MonoError error; int module_count; MonoImage **new_modules; MonoImage *ass; char *name, *fqname; /* * FIXME: we already created an image in mono_image_basic_init (), but * we don't know which module it belongs to, since that is only * determined at assembly save time. */ /*image = (MonoDynamicImage*)ab->dynamic_assembly->assembly.image; */ name = mono_string_to_utf8 (ab->name); fqname = mono_string_to_utf8_checked (moduleb->module.fqname, &error); if (!mono_error_ok (&error)) { g_free (name); mono_error_raise_exception (&error); } image = create_dynamic_mono_image (ab->dynamic_assembly, name, fqname); moduleb->module.image = &image->image; moduleb->dynamic_image = image; register_module (mono_object_domain (moduleb), moduleb, image); /* register the module with the assembly */ ass = ab->dynamic_assembly->assembly.image; module_count = ass->module_count; new_modules = g_new0 (MonoImage *, module_count + 1); if (ass->modules) memcpy (new_modules, ass->modules, module_count * sizeof (MonoImage *)); new_modules [module_count] = &image->image; mono_image_addref (&image->image); g_free (ass->modules); ass->modules = new_modules; ass->module_count ++; } } void mono_image_set_wrappers_type (MonoReflectionModuleBuilder *moduleb, MonoReflectionType *type) { MonoDynamicImage *image = moduleb->dynamic_image; g_assert (type->type); image->wrappers_type = mono_class_from_mono_type (type->type); } #endif /* * mono_assembly_get_object: * @domain: an app domain * @assembly: an assembly * * Return an System.Reflection.Assembly object representing the MonoAssembly @assembly. */ MonoReflectionAssembly* mono_assembly_get_object (MonoDomain *domain, MonoAssembly *assembly) { static MonoClass *System_Reflection_Assembly; MonoReflectionAssembly *res; CHECK_OBJECT (MonoReflectionAssembly *, assembly, NULL); if (!System_Reflection_Assembly) System_Reflection_Assembly = mono_class_from_name ( mono_defaults.corlib, "System.Reflection", "Assembly"); res = (MonoReflectionAssembly *)mono_object_new (domain, System_Reflection_Assembly); res->assembly = assembly; CACHE_OBJECT (MonoReflectionAssembly *, assembly, res, NULL); } MonoReflectionModule* mono_module_get_object (MonoDomain *domain, MonoImage *image) { static MonoClass *System_Reflection_Module; MonoReflectionModule *res; char* basename; CHECK_OBJECT (MonoReflectionModule *, image, NULL); if (!System_Reflection_Module) System_Reflection_Module = mono_class_from_name ( mono_defaults.corlib, "System.Reflection", "Module"); res = (MonoReflectionModule *)mono_object_new (domain, System_Reflection_Module); res->image = image; MONO_OBJECT_SETREF (res, assembly, (MonoReflectionAssembly *) mono_assembly_get_object(domain, image->assembly)); MONO_OBJECT_SETREF (res, fqname, mono_string_new (domain, image->name)); basename = g_path_get_basename (image->name); MONO_OBJECT_SETREF (res, name, mono_string_new (domain, basename)); MONO_OBJECT_SETREF (res, scopename, mono_string_new (domain, image->module_name)); g_free (basename); if (image->assembly->image == image) { res->token = mono_metadata_make_token (MONO_TABLE_MODULE, 1); } else { int i; res->token = 0; if (image->assembly->image->modules) { for (i = 0; i < image->assembly->image->module_count; i++) { if (image->assembly->image->modules [i] == image) res->token = mono_metadata_make_token (MONO_TABLE_MODULEREF, i + 1); } g_assert (res->token); } } CACHE_OBJECT (MonoReflectionModule *, image, res, NULL); } MonoReflectionModule* mono_module_file_get_object (MonoDomain *domain, MonoImage *image, int table_index) { static MonoClass *System_Reflection_Module; MonoReflectionModule *res; MonoTableInfo *table; guint32 cols [MONO_FILE_SIZE]; const char *name; guint32 i, name_idx; const char *val; if (!System_Reflection_Module) System_Reflection_Module = mono_class_from_name ( mono_defaults.corlib, "System.Reflection", "Module"); res = (MonoReflectionModule *)mono_object_new (domain, System_Reflection_Module); table = &image->tables [MONO_TABLE_FILE]; g_assert (table_index < table->rows); mono_metadata_decode_row (table, table_index, cols, MONO_FILE_SIZE); res->image = NULL; MONO_OBJECT_SETREF (res, assembly, (MonoReflectionAssembly *) mono_assembly_get_object(domain, image->assembly)); name = mono_metadata_string_heap (image, cols [MONO_FILE_NAME]); /* Check whenever the row has a corresponding row in the moduleref table */ table = &image->tables [MONO_TABLE_MODULEREF]; for (i = 0; i < table->rows; ++i) { name_idx = mono_metadata_decode_row_col (table, i, MONO_MODULEREF_NAME); val = mono_metadata_string_heap (image, name_idx); if (strcmp (val, name) == 0) res->image = image->modules [i]; } MONO_OBJECT_SETREF (res, fqname, mono_string_new (domain, name)); MONO_OBJECT_SETREF (res, name, mono_string_new (domain, name)); MONO_OBJECT_SETREF (res, scopename, mono_string_new (domain, name)); res->is_resource = cols [MONO_FILE_FLAGS] && FILE_CONTAINS_NO_METADATA; res->token = mono_metadata_make_token (MONO_TABLE_FILE, table_index + 1); return res; } static gboolean mymono_metadata_type_equal (MonoType *t1, MonoType *t2) { if ((t1->type != t2->type) || (t1->byref != t2->byref)) return FALSE; switch (t1->type) { case MONO_TYPE_VOID: case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_STRING: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_OBJECT: case MONO_TYPE_TYPEDBYREF: return TRUE; case MONO_TYPE_VALUETYPE: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: return t1->data.klass == t2->data.klass; case MONO_TYPE_PTR: return mymono_metadata_type_equal (t1->data.type, t2->data.type); case MONO_TYPE_ARRAY: if (t1->data.array->rank != t2->data.array->rank) return FALSE; return t1->data.array->eklass == t2->data.array->eklass; case MONO_TYPE_GENERICINST: { int i; MonoGenericInst *i1 = t1->data.generic_class->context.class_inst; MonoGenericInst *i2 = t2->data.generic_class->context.class_inst; if (i1->type_argc != i2->type_argc) return FALSE; if (!mono_metadata_type_equal (&t1->data.generic_class->container_class->byval_arg, &t2->data.generic_class->container_class->byval_arg)) return FALSE; /* FIXME: we should probably just compare the instance pointers directly. */ for (i = 0; i < i1->type_argc; ++i) { if (!mono_metadata_type_equal (i1->type_argv [i], i2->type_argv [i])) return FALSE; } return TRUE; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: return t1->data.generic_param == t2->data.generic_param; default: g_error ("implement type compare for %0x!", t1->type); return FALSE; } return FALSE; } static guint mymono_metadata_type_hash (MonoType *t1) { guint hash; hash = t1->type; hash |= t1->byref << 6; /* do not collide with t1->type values */ switch (t1->type) { case MONO_TYPE_VALUETYPE: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: /* check if the distribution is good enough */ return ((hash << 5) - hash) ^ g_str_hash (t1->data.klass->name); case MONO_TYPE_PTR: return ((hash << 5) - hash) ^ mymono_metadata_type_hash (t1->data.type); case MONO_TYPE_GENERICINST: { int i; MonoGenericInst *inst = t1->data.generic_class->context.class_inst; hash += g_str_hash (t1->data.generic_class->container_class->name); hash *= 13; for (i = 0; i < inst->type_argc; ++i) { hash += mymono_metadata_type_hash (inst->type_argv [i]); hash *= 13; } return hash; } } return hash; } static MonoReflectionGenericClass* mono_generic_class_get_object (MonoDomain *domain, MonoType *geninst) { static MonoClass *System_Reflection_MonoGenericClass; MonoReflectionGenericClass *res; MonoClass *klass, *gklass; MonoGenericInst *ginst; MonoArray *type_args; int i; if (!System_Reflection_MonoGenericClass) { System_Reflection_MonoGenericClass = mono_class_from_name ( mono_defaults.corlib, "System.Reflection", "MonoGenericClass"); g_assert (System_Reflection_MonoGenericClass); } klass = mono_class_from_mono_type (geninst); gklass = klass->generic_class->container_class; mono_class_init (klass); #ifdef HAVE_SGEN_GC res = (MonoReflectionGenericClass *) mono_gc_alloc_pinned_obj (mono_class_vtable (domain, System_Reflection_MonoGenericClass), mono_class_instance_size (System_Reflection_MonoGenericClass)); #else res = (MonoReflectionGenericClass *) mono_object_new (domain, System_Reflection_MonoGenericClass); #endif res->type.type = geninst; g_assert (gklass->reflection_info); g_assert (!strcmp (((MonoObject*)gklass->reflection_info)->vtable->klass->name, "TypeBuilder")); MONO_OBJECT_SETREF (res, generic_type, gklass->reflection_info); ginst = klass->generic_class->context.class_inst; type_args = mono_array_new (domain, mono_defaults.systemtype_class, ginst->type_argc); for (i = 0; i < ginst->type_argc; ++i) mono_array_setref (type_args, i, mono_type_get_object (domain, ginst->type_argv [i])); MONO_OBJECT_SETREF (res, type_arguments, type_args); return res; } static gboolean verify_safe_for_managed_space (MonoType *type) { switch (type->type) { #ifdef DEBUG_HARDER case MONO_TYPE_ARRAY: return verify_safe_for_managed_space (&type->data.array->eklass->byval_arg); case MONO_TYPE_PTR: return verify_safe_for_managed_space (type->data.type); case MONO_TYPE_SZARRAY: return verify_safe_for_managed_space (&type->data.klass->byval_arg); case MONO_TYPE_GENERICINST: { MonoGenericInst *inst = type->data.generic_class->inst; int i; if (!inst->is_open) break; for (i = 0; i < inst->type_argc; ++i) if (!verify_safe_for_managed_space (inst->type_argv [i])) return FALSE; break; } #endif case MONO_TYPE_VAR: case MONO_TYPE_MVAR: return TRUE; } return TRUE; } /* * mono_type_get_object: * @domain: an app domain * @type: a type * * Return an System.MonoType object representing the type @type. */ MonoReflectionType* mono_type_get_object (MonoDomain *domain, MonoType *type) { MonoReflectionType *res; MonoClass *klass = mono_class_from_mono_type (type); /*we must avoid using @type as it might have come * from a mono_metadata_type_dup and the caller * expects that is can be freed. * Using the right type from */ type = klass->byval_arg.byref == type->byref ? &klass->byval_arg : &klass->this_arg; /* void is very common */ if (type->type == MONO_TYPE_VOID && domain->typeof_void) return (MonoReflectionType*)domain->typeof_void; /* * If the vtable of the given class was already created, we can use * the MonoType from there and avoid all locking and hash table lookups. * * We cannot do this for TypeBuilders as mono_reflection_create_runtime_class expects * that the resulting object is different. */ if (type == &klass->byval_arg && !klass->image->dynamic) { MonoVTable *vtable = mono_class_try_get_vtable (domain, klass); if (vtable && vtable->type) return vtable->type; } mono_loader_lock (); /*FIXME mono_class_init and mono_class_vtable acquire it*/ mono_domain_lock (domain); if (!domain->type_hash) domain->type_hash = mono_g_hash_table_new_type ((GHashFunc)mymono_metadata_type_hash, (GCompareFunc)mymono_metadata_type_equal, MONO_HASH_VALUE_GC); if ((res = mono_g_hash_table_lookup (domain->type_hash, type))) { mono_domain_unlock (domain); mono_loader_unlock (); return res; } /* Create a MonoGenericClass object for instantiations of not finished TypeBuilders */ if ((type->type == MONO_TYPE_GENERICINST) && type->data.generic_class->is_dynamic && !type->data.generic_class->container_class->wastypebuilder) { res = (MonoReflectionType *)mono_generic_class_get_object (domain, type); mono_g_hash_table_insert (domain->type_hash, type, res); mono_domain_unlock (domain); mono_loader_unlock (); return res; } if (!verify_safe_for_managed_space (type)) { mono_domain_unlock (domain); mono_loader_unlock (); mono_raise_exception (mono_get_exception_invalid_operation ("This type cannot be propagated to managed space")); } if (klass->reflection_info && !klass->wastypebuilder) { gboolean is_type_done = TRUE; /* Generic parameters have reflection_info set but they are not finished together with their enclosing type. * We must ensure that once a type is finished we don't return a GenericTypeParameterBuilder. * We can't simply close the types as this will interfere with other parts of the generics machinery. */ if (klass->byval_arg.type == MONO_TYPE_MVAR || klass->byval_arg.type == MONO_TYPE_VAR) { MonoGenericParam *gparam = klass->byval_arg.data.generic_param; if (gparam->owner && gparam->owner->is_method) { MonoMethod *method = gparam->owner->owner.method; if (method && mono_class_get_generic_type_definition (method->klass)->wastypebuilder) is_type_done = FALSE; } else if (gparam->owner && !gparam->owner->is_method) { MonoClass *klass = gparam->owner->owner.klass; if (klass && mono_class_get_generic_type_definition (klass)->wastypebuilder) is_type_done = FALSE; } } /* g_assert_not_reached (); */ /* should this be considered an error condition? */ if (is_type_done && !type->byref) { mono_domain_unlock (domain); mono_loader_unlock (); return klass->reflection_info; } } // FIXME: Get rid of this, do it in the icalls for Type mono_class_init (klass); #ifdef HAVE_SGEN_GC res = (MonoReflectionType *)mono_gc_alloc_pinned_obj (mono_class_vtable (domain, mono_defaults.monotype_class), mono_class_instance_size (mono_defaults.monotype_class)); #else res = (MonoReflectionType *)mono_object_new (domain, mono_defaults.monotype_class); #endif res->type = type; mono_g_hash_table_insert (domain->type_hash, type, res); if (type->type == MONO_TYPE_VOID) domain->typeof_void = (MonoObject*)res; mono_domain_unlock (domain); mono_loader_unlock (); return res; } /* * mono_method_get_object: * @domain: an app domain * @method: a method * @refclass: the reflected type (can be NULL) * * Return an System.Reflection.MonoMethod object representing the method @method. */ MonoReflectionMethod* mono_method_get_object (MonoDomain *domain, MonoMethod *method, MonoClass *refclass) { /* * We use the same C representation for methods and constructors, but the type * name in C# is different. */ static MonoClass *System_Reflection_MonoMethod = NULL; static MonoClass *System_Reflection_MonoCMethod = NULL; static MonoClass *System_Reflection_MonoGenericMethod = NULL; static MonoClass *System_Reflection_MonoGenericCMethod = NULL; MonoClass *klass; MonoReflectionMethod *ret; if (method->is_inflated) { MonoReflectionGenericMethod *gret; refclass = method->klass; CHECK_OBJECT (MonoReflectionMethod *, method, refclass); if ((*method->name == '.') && (!strcmp (method->name, ".ctor") || !strcmp (method->name, ".cctor"))) { if (!System_Reflection_MonoGenericCMethod) System_Reflection_MonoGenericCMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoGenericCMethod"); klass = System_Reflection_MonoGenericCMethod; } else { if (!System_Reflection_MonoGenericMethod) System_Reflection_MonoGenericMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoGenericMethod"); klass = System_Reflection_MonoGenericMethod; } gret = (MonoReflectionGenericMethod*)mono_object_new (domain, klass); gret->method.method = method; MONO_OBJECT_SETREF (gret, method.name, mono_string_new (domain, method->name)); MONO_OBJECT_SETREF (gret, method.reftype, mono_type_get_object (domain, &refclass->byval_arg)); CACHE_OBJECT (MonoReflectionMethod *, method, (MonoReflectionMethod*)gret, refclass); } if (!refclass) refclass = method->klass; CHECK_OBJECT (MonoReflectionMethod *, method, refclass); if (*method->name == '.' && (strcmp (method->name, ".ctor") == 0 || strcmp (method->name, ".cctor") == 0)) { if (!System_Reflection_MonoCMethod) System_Reflection_MonoCMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoCMethod"); klass = System_Reflection_MonoCMethod; } else { if (!System_Reflection_MonoMethod) System_Reflection_MonoMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoMethod"); klass = System_Reflection_MonoMethod; } ret = (MonoReflectionMethod*)mono_object_new (domain, klass); ret->method = method; MONO_OBJECT_SETREF (ret, reftype, mono_type_get_object (domain, &refclass->byval_arg)); CACHE_OBJECT (MonoReflectionMethod *, method, ret, refclass); } /* * mono_method_clear_object: * * Clear the cached reflection objects for the dynamic method METHOD. */ void mono_method_clear_object (MonoDomain *domain, MonoMethod *method) { MonoClass *klass; g_assert (method->dynamic); klass = method->klass; while (klass) { clear_cached_object (domain, method, klass); klass = klass->parent; } /* Added by mono_param_get_objects () */ clear_cached_object (domain, &(method->signature), NULL); klass = method->klass; while (klass) { clear_cached_object (domain, &(method->signature), klass); klass = klass->parent; } } /* * mono_field_get_object: * @domain: an app domain * @klass: a type * @field: a field * * Return an System.Reflection.MonoField object representing the field @field * in class @klass. */ MonoReflectionField* mono_field_get_object (MonoDomain *domain, MonoClass *klass, MonoClassField *field) { MonoReflectionField *res; static MonoClass *monofield_klass; CHECK_OBJECT (MonoReflectionField *, field, klass); if (!monofield_klass) monofield_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoField"); res = (MonoReflectionField *)mono_object_new (domain, monofield_klass); res->klass = klass; res->field = field; MONO_OBJECT_SETREF (res, name, mono_string_new (domain, mono_field_get_name (field))); if (is_field_on_inst (field)) res->attrs = get_field_on_inst_generic_type (field)->attrs; else res->attrs = field->type->attrs; MONO_OBJECT_SETREF (res, type, mono_type_get_object (domain, field->type)); CACHE_OBJECT (MonoReflectionField *, field, res, klass); } /* * mono_property_get_object: * @domain: an app domain * @klass: a type * @property: a property * * Return an System.Reflection.MonoProperty object representing the property @property * in class @klass. */ MonoReflectionProperty* mono_property_get_object (MonoDomain *domain, MonoClass *klass, MonoProperty *property) { MonoReflectionProperty *res; static MonoClass *monoproperty_klass; CHECK_OBJECT (MonoReflectionProperty *, property, klass); if (!monoproperty_klass) monoproperty_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoProperty"); res = (MonoReflectionProperty *)mono_object_new (domain, monoproperty_klass); res->klass = klass; res->property = property; CACHE_OBJECT (MonoReflectionProperty *, property, res, klass); } /* * mono_event_get_object: * @domain: an app domain * @klass: a type * @event: a event * * Return an System.Reflection.MonoEvent object representing the event @event * in class @klass. */ MonoReflectionEvent* mono_event_get_object (MonoDomain *domain, MonoClass *klass, MonoEvent *event) { MonoReflectionEvent *res; MonoReflectionMonoEvent *mono_event; static MonoClass *monoevent_klass; CHECK_OBJECT (MonoReflectionEvent *, event, klass); if (!monoevent_klass) monoevent_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoEvent"); mono_event = (MonoReflectionMonoEvent *)mono_object_new (domain, monoevent_klass); mono_event->klass = klass; mono_event->event = event; res = (MonoReflectionEvent*)mono_event; CACHE_OBJECT (MonoReflectionEvent *, event, res, klass); } /** * mono_get_reflection_missing_object: * @domain: Domain where the object lives * * Returns the System.Reflection.Missing.Value singleton object * (of type System.Reflection.Missing). * * Used as the value for ParameterInfo.DefaultValue when Optional * is present */ static MonoObject * mono_get_reflection_missing_object (MonoDomain *domain) { MonoObject *obj; static MonoClassField *missing_value_field = NULL; if (!missing_value_field) { MonoClass *missing_klass; missing_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Missing"); mono_class_init (missing_klass); missing_value_field = mono_class_get_field_from_name (missing_klass, "Value"); g_assert (missing_value_field); } obj = mono_field_get_value_object (domain, missing_value_field, NULL); g_assert (obj); return obj; } static MonoObject* get_dbnull (MonoDomain *domain, MonoObject **dbnull) { if (!*dbnull) *dbnull = mono_get_dbnull_object (domain); return *dbnull; } static MonoObject* get_reflection_missing (MonoDomain *domain, MonoObject **reflection_missing) { if (!*reflection_missing) *reflection_missing = mono_get_reflection_missing_object (domain); return *reflection_missing; } /* * mono_param_get_objects: * @domain: an app domain * @method: a method * * Return an System.Reflection.ParameterInfo array object representing the parameters * in the method @method. */ MonoArray* mono_param_get_objects_internal (MonoDomain *domain, MonoMethod *method, MonoClass *refclass) { static MonoClass *System_Reflection_ParameterInfo; static MonoClass *System_Reflection_ParameterInfo_array; MonoArray *res = NULL; MonoReflectionMethod *member = NULL; MonoReflectionParameter *param = NULL; char **names, **blobs = NULL; guint32 *types = NULL; MonoType *type = NULL; MonoObject *dbnull = NULL; MonoObject *missing = NULL; MonoMarshalSpec **mspecs; MonoMethodSignature *sig; MonoVTable *pinfo_vtable; int i; if (!System_Reflection_ParameterInfo_array) { MonoClass *klass; klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "ParameterInfo"); mono_memory_barrier (); System_Reflection_ParameterInfo = klass; klass = mono_array_class_get (klass, 1); mono_memory_barrier (); System_Reflection_ParameterInfo_array = klass; } if (!mono_method_signature (method)->param_count) return mono_array_new_specific (mono_class_vtable (domain, System_Reflection_ParameterInfo_array), 0); /* Note: the cache is based on the address of the signature into the method * since we already cache MethodInfos with the method as keys. */ CHECK_OBJECT (MonoArray*, &(method->signature), refclass); sig = mono_method_signature (method); member = mono_method_get_object (domain, method, refclass); names = g_new (char *, sig->param_count); mono_method_get_param_names (method, (const char **) names); mspecs = g_new (MonoMarshalSpec*, sig->param_count + 1); mono_method_get_marshal_info (method, mspecs); res = mono_array_new_specific (mono_class_vtable (domain, System_Reflection_ParameterInfo_array), sig->param_count); pinfo_vtable = mono_class_vtable (domain, System_Reflection_ParameterInfo); for (i = 0; i < sig->param_count; ++i) { param = (MonoReflectionParameter *)mono_object_new_specific (pinfo_vtable); MONO_OBJECT_SETREF (param, ClassImpl, mono_type_get_object (domain, sig->params [i])); MONO_OBJECT_SETREF (param, MemberImpl, (MonoObject*)member); MONO_OBJECT_SETREF (param, NameImpl, mono_string_new (domain, names [i])); param->PositionImpl = i; param->AttrsImpl = sig->params [i]->attrs; if (!(param->AttrsImpl & PARAM_ATTRIBUTE_HAS_DEFAULT)) { if (param->AttrsImpl & PARAM_ATTRIBUTE_OPTIONAL) MONO_OBJECT_SETREF (param, DefaultValueImpl, get_reflection_missing (domain, &missing)); else MONO_OBJECT_SETREF (param, DefaultValueImpl, get_dbnull (domain, &dbnull)); } else { if (!blobs) { blobs = g_new0 (char *, sig->param_count); types = g_new0 (guint32, sig->param_count); get_default_param_value_blobs (method, blobs, types); } /* Build MonoType for the type from the Constant Table */ if (!type) type = g_new0 (MonoType, 1); type->type = types [i]; type->data.klass = NULL; if (types [i] == MONO_TYPE_CLASS) type->data.klass = mono_defaults.object_class; else if ((sig->params [i]->type == MONO_TYPE_VALUETYPE) && sig->params [i]->data.klass->enumtype) { /* For enums, types [i] contains the base type */ type->type = MONO_TYPE_VALUETYPE; type->data.klass = mono_class_from_mono_type (sig->params [i]); } else type->data.klass = mono_class_from_mono_type (type); MONO_OBJECT_SETREF (param, DefaultValueImpl, mono_get_object_from_blob (domain, type, blobs [i])); /* Type in the Constant table is MONO_TYPE_CLASS for nulls */ if (types [i] != MONO_TYPE_CLASS && !param->DefaultValueImpl) { if (param->AttrsImpl & PARAM_ATTRIBUTE_OPTIONAL) MONO_OBJECT_SETREF (param, DefaultValueImpl, get_reflection_missing (domain, &missing)); else MONO_OBJECT_SETREF (param, DefaultValueImpl, get_dbnull (domain, &dbnull)); } } if (mspecs [i + 1]) MONO_OBJECT_SETREF (param, MarshalAsImpl, (MonoObject*)mono_reflection_marshal_from_marshal_spec (domain, method->klass, mspecs [i + 1])); mono_array_setref (res, i, param); } g_free (names); g_free (blobs); g_free (types); g_free (type); for (i = mono_method_signature (method)->param_count; i >= 0; i--) if (mspecs [i]) mono_metadata_free_marshal_spec (mspecs [i]); g_free (mspecs); CACHE_OBJECT (MonoArray *, &(method->signature), res, refclass); } MonoArray* mono_param_get_objects (MonoDomain *domain, MonoMethod *method) { return mono_param_get_objects_internal (domain, method, NULL); } /* * mono_method_body_get_object: * @domain: an app domain * @method: a method * * Return an System.Reflection.MethodBody object representing the method @method. */ MonoReflectionMethodBody* mono_method_body_get_object (MonoDomain *domain, MonoMethod *method) { static MonoClass *System_Reflection_MethodBody = NULL; static MonoClass *System_Reflection_LocalVariableInfo = NULL; static MonoClass *System_Reflection_ExceptionHandlingClause = NULL; MonoReflectionMethodBody *ret; MonoMethodNormal *mn; MonoMethodHeader *header; MonoImage *image; guint32 method_rva, local_var_sig_token; char *ptr; unsigned char format, flags; int i; /* for compatibility with .net */ if (method->dynamic) mono_raise_exception (mono_get_exception_invalid_operation (NULL)); if (!System_Reflection_MethodBody) System_Reflection_MethodBody = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MethodBody"); if (!System_Reflection_LocalVariableInfo) System_Reflection_LocalVariableInfo = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "LocalVariableInfo"); if (!System_Reflection_ExceptionHandlingClause) System_Reflection_ExceptionHandlingClause = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "ExceptionHandlingClause"); CHECK_OBJECT (MonoReflectionMethodBody *, method, NULL); if ((method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) || (method->flags & METHOD_ATTRIBUTE_ABSTRACT) || (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) return NULL; mn = (MonoMethodNormal *)method; image = method->klass->image; header = mono_method_get_header (method); if (!image->dynamic) { /* Obtain local vars signature token */ method_rva = mono_metadata_decode_row_col (&image->tables [MONO_TABLE_METHOD], mono_metadata_token_index (method->token) - 1, MONO_METHOD_RVA); ptr = mono_image_rva_map (image, method_rva); flags = *(const unsigned char *) ptr; format = flags & METHOD_HEADER_FORMAT_MASK; switch (format){ case METHOD_HEADER_TINY_FORMAT: local_var_sig_token = 0; break; case METHOD_HEADER_FAT_FORMAT: ptr += 2; ptr += 2; ptr += 4; local_var_sig_token = read32 (ptr); break; default: g_assert_not_reached (); } } else local_var_sig_token = 0; //FIXME ret = (MonoReflectionMethodBody*)mono_object_new (domain, System_Reflection_MethodBody); ret->init_locals = header->init_locals; ret->max_stack = header->max_stack; ret->local_var_sig_token = local_var_sig_token; MONO_OBJECT_SETREF (ret, il, mono_array_new_cached (domain, mono_defaults.byte_class, header->code_size)); memcpy (mono_array_addr (ret->il, guint8, 0), header->code, header->code_size); /* Locals */ MONO_OBJECT_SETREF (ret, locals, mono_array_new_cached (domain, System_Reflection_LocalVariableInfo, header->num_locals)); for (i = 0; i < header->num_locals; ++i) { MonoReflectionLocalVariableInfo *info = (MonoReflectionLocalVariableInfo*)mono_object_new (domain, System_Reflection_LocalVariableInfo); MONO_OBJECT_SETREF (info, local_type, mono_type_get_object (domain, header->locals [i])); info->is_pinned = header->locals [i]->pinned; info->local_index = i; mono_array_setref (ret->locals, i, info); } /* Exceptions */ MONO_OBJECT_SETREF (ret, clauses, mono_array_new_cached (domain, System_Reflection_ExceptionHandlingClause, header->num_clauses)); for (i = 0; i < header->num_clauses; ++i) { MonoReflectionExceptionHandlingClause *info = (MonoReflectionExceptionHandlingClause*)mono_object_new (domain, System_Reflection_ExceptionHandlingClause); MonoExceptionClause *clause = &header->clauses [i]; info->flags = clause->flags; info->try_offset = clause->try_offset; info->try_length = clause->try_len; info->handler_offset = clause->handler_offset; info->handler_length = clause->handler_len; if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) info->filter_offset = clause->data.filter_offset; else if (clause->data.catch_class) MONO_OBJECT_SETREF (info, catch_type, mono_type_get_object (mono_domain_get (), &clause->data.catch_class->byval_arg)); mono_array_setref (ret->clauses, i, info); } CACHE_OBJECT (MonoReflectionMethodBody *, method, ret, NULL); return ret; } /** * mono_get_dbnull_object: * @domain: Domain where the object lives * * Returns the System.DBNull.Value singleton object * * Used as the value for ParameterInfo.DefaultValue */ MonoObject * mono_get_dbnull_object (MonoDomain *domain) { MonoObject *obj; static MonoClassField *dbnull_value_field = NULL; if (!dbnull_value_field) { MonoClass *dbnull_klass; dbnull_klass = mono_class_from_name (mono_defaults.corlib, "System", "DBNull"); mono_class_init (dbnull_klass); dbnull_value_field = mono_class_get_field_from_name (dbnull_klass, "Value"); g_assert (dbnull_value_field); } obj = mono_field_get_value_object (domain, dbnull_value_field, NULL); g_assert (obj); return obj; } static void get_default_param_value_blobs (MonoMethod *method, char **blobs, guint32 *types) { guint32 param_index, i, lastp, crow = 0; guint32 param_cols [MONO_PARAM_SIZE], const_cols [MONO_CONSTANT_SIZE]; gint32 idx; MonoClass *klass = method->klass; MonoImage *image = klass->image; MonoMethodSignature *methodsig = mono_method_signature (method); MonoTableInfo *constt; MonoTableInfo *methodt; MonoTableInfo *paramt; if (!methodsig->param_count) return; mono_class_init (klass); if (klass->image->dynamic) { MonoReflectionMethodAux *aux; if (method->is_inflated) method = ((MonoMethodInflated*)method)->declaring; aux = g_hash_table_lookup (((MonoDynamicImage*)method->klass->image)->method_aux_hash, method); if (aux && aux->param_defaults) { memcpy (blobs, &(aux->param_defaults [1]), methodsig->param_count * sizeof (char*)); memcpy (types, &(aux->param_default_types [1]), methodsig->param_count * sizeof (guint32)); } return; } methodt = &klass->image->tables [MONO_TABLE_METHOD]; paramt = &klass->image->tables [MONO_TABLE_PARAM]; constt = &image->tables [MONO_TABLE_CONSTANT]; idx = mono_method_get_index (method) - 1; g_assert (idx != -1); param_index = mono_metadata_decode_row_col (methodt, idx, MONO_METHOD_PARAMLIST); if (idx + 1 < methodt->rows) lastp = mono_metadata_decode_row_col (methodt, idx + 1, MONO_METHOD_PARAMLIST); else lastp = paramt->rows + 1; for (i = param_index; i < lastp; ++i) { guint32 paramseq; mono_metadata_decode_row (paramt, i - 1, param_cols, MONO_PARAM_SIZE); paramseq = param_cols [MONO_PARAM_SEQUENCE]; if (!(param_cols [MONO_PARAM_FLAGS] & PARAM_ATTRIBUTE_HAS_DEFAULT)) continue; crow = mono_metadata_get_constant_index (image, MONO_TOKEN_PARAM_DEF | i, crow + 1); if (!crow) { continue; } mono_metadata_decode_row (constt, crow - 1, const_cols, MONO_CONSTANT_SIZE); blobs [paramseq - 1] = (gpointer) mono_metadata_blob_heap (image, const_cols [MONO_CONSTANT_VALUE]); types [paramseq - 1] = const_cols [MONO_CONSTANT_TYPE]; } return; } static MonoObject * mono_get_object_from_blob (MonoDomain *domain, MonoType *type, const char *blob) { void *retval; MonoClass *klass; MonoObject *object; MonoType *basetype = type; if (!blob) return NULL; klass = mono_class_from_mono_type (type); if (klass->valuetype) { object = mono_object_new (domain, klass); retval = ((gchar *) object + sizeof (MonoObject)); if (klass->enumtype) basetype = mono_class_enum_basetype (klass); } else { retval = &object; } if (!mono_get_constant_value_from_blob (domain, basetype->type, blob, retval)) return object; else return NULL; } static int assembly_name_to_aname (MonoAssemblyName *assembly, char *p) { int found_sep; char *s; memset (assembly, 0, sizeof (MonoAssemblyName)); assembly->name = p; assembly->culture = ""; memset (assembly->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH); while (*p && (isalnum (*p) || *p == '.' || *p == '-' || *p == '_' || *p == '$' || *p == '@')) p++; found_sep = 0; while (g_ascii_isspace (*p) || *p == ',') { *p++ = 0; found_sep = 1; continue; } /* failed */ if (!found_sep) return 1; while (*p) { if (*p == 'V' && g_ascii_strncasecmp (p, "Version=", 8) == 0) { p += 8; assembly->major = strtoul (p, &s, 10); if (s == p || *s != '.') return 1; p = ++s; assembly->minor = strtoul (p, &s, 10); if (s == p || *s != '.') return 1; p = ++s; assembly->build = strtoul (p, &s, 10); if (s == p || *s != '.') return 1; p = ++s; assembly->revision = strtoul (p, &s, 10); if (s == p) return 1; p = s; } else if (*p == 'C' && g_ascii_strncasecmp (p, "Culture=", 8) == 0) { p += 8; if (g_ascii_strncasecmp (p, "neutral", 7) == 0) { assembly->culture = ""; p += 7; } else { assembly->culture = p; while (*p && *p != ',') { p++; } } } else if (*p == 'P' && g_ascii_strncasecmp (p, "PublicKeyToken=", 15) == 0) { p += 15; if (strncmp (p, "null", 4) == 0) { p += 4; } else { int len; gchar *start = p; while (*p && *p != ',') { p++; } len = (p - start + 1); if (len > MONO_PUBLIC_KEY_TOKEN_LENGTH) len = MONO_PUBLIC_KEY_TOKEN_LENGTH; g_strlcpy ((char*)assembly->public_key_token, start, len); } } else { while (*p && *p != ',') p++; } found_sep = 0; while (g_ascii_isspace (*p) || *p == ',') { *p++ = 0; found_sep = 1; continue; } /* failed */ if (!found_sep) return 1; } return 0; } /* * mono_reflection_parse_type: * @name: type name * * Parse a type name as accepted by the GetType () method and output the info * extracted in the info structure. * the name param will be mangled, so, make a copy before passing it to this function. * The fields in info will be valid until the memory pointed to by name is valid. * * See also mono_type_get_name () below. * * Returns: 0 on parse error. */ static int _mono_reflection_parse_type (char *name, char **endptr, gboolean is_recursed, MonoTypeNameParse *info) { char *start, *p, *w, *temp, *last_point, *startn; int in_modifiers = 0; int isbyref = 0, rank, arity = 0, i; start = p = w = name; //FIXME could we just zero the whole struct? memset (&info, 0, sizeof (MonoTypeNameParse)) memset (&info->assembly, 0, sizeof (MonoAssemblyName)); info->name = info->name_space = NULL; info->nested = NULL; info->modifiers = NULL; info->type_arguments = NULL; /* last_point separates the namespace from the name */ last_point = NULL; /* Skips spaces */ while (*p == ' ') p++, start++, w++, name++; while (*p) { switch (*p) { case '+': *p = 0; /* NULL terminate the name */ startn = p + 1; info->nested = g_list_append (info->nested, startn); /* we have parsed the nesting namespace + name */ if (info->name) break; if (last_point) { info->name_space = start; *last_point = 0; info->name = last_point + 1; } else { info->name_space = (char *)""; info->name = start; } break; case '.': last_point = p; break; case '\\': ++p; break; case '&': case '*': case '[': case ',': case ']': in_modifiers = 1; break; case '`': ++p; i = strtol (p, &temp, 10); arity += i; if (p == temp) return 0; p = temp-1; break; default: break; } if (in_modifiers) break; // *w++ = *p++; p++; } if (!info->name) { if (last_point) { info->name_space = start; *last_point = 0; info->name = last_point + 1; } else { info->name_space = (char *)""; info->name = start; } } while (*p) { switch (*p) { case '&': if (isbyref) /* only one level allowed by the spec */ return 0; isbyref = 1; info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (0)); *p++ = 0; break; case '*': info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (-1)); *p++ = 0; break; case '[': if (arity != 0) { *p++ = 0; info->type_arguments = g_ptr_array_new (); for (i = 0; i < arity; i++) { MonoTypeNameParse *subinfo = g_new0 (MonoTypeNameParse, 1); gboolean fqname = FALSE; g_ptr_array_add (info->type_arguments, subinfo); if (*p == '[') { p++; fqname = TRUE; } if (!_mono_reflection_parse_type (p, &p, TRUE, subinfo)) return 0; /*MS is lenient on [] delimited parameters that aren't fqn - and F# uses them.*/ if (fqname && (*p != ']')) { char *aname; if (*p != ',') return 0; *p++ = 0; aname = p; while (*p && (*p != ']')) p++; if (*p != ']') return 0; *p++ = 0; while (*aname) { if (g_ascii_isspace (*aname)) { ++aname; continue; } break; } if (!*aname || !assembly_name_to_aname (&subinfo->assembly, aname)) return 0; } else if (fqname && (*p == ']')) { *p++ = 0; } if (i + 1 < arity) { if (*p != ',') return 0; } else { if (*p != ']') return 0; } *p++ = 0; } arity = 0; break; } rank = 1; *p++ = 0; while (*p) { if (*p == ']') break; if (*p == ',') rank++; else if (*p == '*') /* '*' means unknown lower bound */ info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (-2)); else return 0; ++p; } if (*p++ != ']') return 0; info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (rank)); break; case ']': if (is_recursed) goto end; return 0; case ',': if (is_recursed) goto end; *p++ = 0; while (*p) { if (g_ascii_isspace (*p)) { ++p; continue; } break; } if (!*p) return 0; /* missing assembly name */ if (!assembly_name_to_aname (&info->assembly, p)) return 0; break; default: return 0; } if (info->assembly.name) break; } // *w = 0; /* terminate class name */ end: if (!info->name || !*info->name) return 0; if (endptr) *endptr = p; /* add other consistency checks */ return 1; } int mono_reflection_parse_type (char *name, MonoTypeNameParse *info) { return _mono_reflection_parse_type (name, NULL, FALSE, info); } static MonoType* _mono_reflection_get_type_from_info (MonoTypeNameParse *info, MonoImage *image, gboolean ignorecase) { gboolean type_resolve = FALSE; MonoType *type; MonoImage *rootimage = image; if (info->assembly.name) { MonoAssembly *assembly = mono_assembly_loaded (&info->assembly); if (!assembly && image && image->assembly && mono_assembly_names_equal (&info->assembly, &image->assembly->aname)) /* * This could happen in the AOT compiler case when the search hook is not * installed. */ assembly = image->assembly; if (!assembly) { /* then we must load the assembly ourselve - see #60439 */ assembly = mono_assembly_load (&info->assembly, NULL, NULL); if (!assembly) return NULL; } image = assembly->image; } else if (!image) { image = mono_defaults.corlib; } type = mono_reflection_get_type_with_rootimage (rootimage, image, info, ignorecase, &type_resolve); if (type == NULL && !info->assembly.name && image != mono_defaults.corlib) { image = mono_defaults.corlib; type = mono_reflection_get_type_with_rootimage (rootimage, image, info, ignorecase, &type_resolve); } return type; } static MonoType* mono_reflection_get_type_internal (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase) { MonoClass *klass; GList *mod; int modval; gboolean bounded = FALSE; if (!image) image = mono_defaults.corlib; if (ignorecase) klass = mono_class_from_name_case (image, info->name_space, info->name); else klass = mono_class_from_name (image, info->name_space, info->name); if (!klass) return NULL; for (mod = info->nested; mod; mod = mod->next) { gpointer iter = NULL; MonoClass *parent; parent = klass; mono_class_init (parent); while ((klass = mono_class_get_nested_types (parent, &iter))) { if (ignorecase) { if (mono_utf8_strcasecmp (klass->name, mod->data) == 0) break; } else { if (strcmp (klass->name, mod->data) == 0) break; } } if (!klass) break; } if (!klass) return NULL; mono_class_init (klass); if (info->type_arguments) { MonoType **type_args = g_new0 (MonoType *, info->type_arguments->len); MonoReflectionType *the_type; MonoType *instance; int i; for (i = 0; i < info->type_arguments->len; i++) { MonoTypeNameParse *subinfo = g_ptr_array_index (info->type_arguments, i); type_args [i] = _mono_reflection_get_type_from_info (subinfo, rootimage, ignorecase); if (!type_args [i]) { g_free (type_args); return NULL; } } the_type = mono_type_get_object (mono_domain_get (), &klass->byval_arg); instance = mono_reflection_bind_generic_parameters ( the_type, info->type_arguments->len, type_args); g_free (type_args); if (!instance) return NULL; klass = mono_class_from_mono_type (instance); } for (mod = info->modifiers; mod; mod = mod->next) { modval = GPOINTER_TO_UINT (mod->data); if (!modval) { /* byref: must be last modifier */ return &klass->this_arg; } else if (modval == -1) { klass = mono_ptr_class_get (&klass->byval_arg); } else if (modval == -2) { bounded = TRUE; } else { /* array rank */ klass = mono_bounded_array_class_get (klass, modval, bounded); } mono_class_init (klass); } return &klass->byval_arg; } /* * mono_reflection_get_type: * @image: a metadata context * @info: type description structure * @ignorecase: flag for case-insensitive string compares * @type_resolve: whenever type resolve was already tried * * Build a MonoType from the type description in @info. * */ MonoType* mono_reflection_get_type (MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve) { return mono_reflection_get_type_with_rootimage(image, image, info, ignorecase, type_resolve); } static MonoType* mono_reflection_get_type_internal_dynamic (MonoImage *rootimage, MonoAssembly *assembly, MonoTypeNameParse *info, gboolean ignorecase) { MonoReflectionAssemblyBuilder *abuilder; MonoType *type; int i; g_assert (assembly->dynamic); abuilder = (MonoReflectionAssemblyBuilder*)mono_assembly_get_object (((MonoDynamicAssembly*)assembly)->domain, assembly); /* Enumerate all modules */ type = NULL; if (abuilder->modules) { for (i = 0; i < mono_array_length (abuilder->modules); ++i) { MonoReflectionModuleBuilder *mb = mono_array_get (abuilder->modules, MonoReflectionModuleBuilder*, i); type = mono_reflection_get_type_internal (rootimage, &mb->dynamic_image->image, info, ignorecase); if (type) break; } } if (!type && abuilder->loaded_modules) { for (i = 0; i < mono_array_length (abuilder->loaded_modules); ++i) { MonoReflectionModule *mod = mono_array_get (abuilder->loaded_modules, MonoReflectionModule*, i); type = mono_reflection_get_type_internal (rootimage, mod->image, info, ignorecase); if (type) break; } } return type; } MonoType* mono_reflection_get_type_with_rootimage (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve) { MonoType *type; MonoReflectionAssembly *assembly; GString *fullName; GList *mod; if (image && image->dynamic) type = mono_reflection_get_type_internal_dynamic (rootimage, image->assembly, info, ignorecase); else type = mono_reflection_get_type_internal (rootimage, image, info, ignorecase); if (type) return type; if (!mono_domain_has_type_resolve (mono_domain_get ())) return NULL; if (type_resolve) { if (*type_resolve) return NULL; else *type_resolve = TRUE; } /* Reconstruct the type name */ fullName = g_string_new (""); if (info->name_space && (info->name_space [0] != '\0')) g_string_printf (fullName, "%s.%s", info->name_space, info->name); else g_string_printf (fullName, "%s", info->name); for (mod = info->nested; mod; mod = mod->next) g_string_append_printf (fullName, "+%s", (char*)mod->data); assembly = mono_domain_try_type_resolve ( mono_domain_get (), fullName->str, NULL); if (assembly) { if (assembly->assembly->dynamic) type = mono_reflection_get_type_internal_dynamic (rootimage, assembly->assembly, info, ignorecase); else type = mono_reflection_get_type_internal (rootimage, assembly->assembly->image, info, ignorecase); } g_string_free (fullName, TRUE); return type; } void mono_reflection_free_type_info (MonoTypeNameParse *info) { g_list_free (info->modifiers); g_list_free (info->nested); if (info->type_arguments) { int i; for (i = 0; i < info->type_arguments->len; i++) { MonoTypeNameParse *subinfo = g_ptr_array_index (info->type_arguments, i); mono_reflection_free_type_info (subinfo); /*We free the subinfo since it is allocated by _mono_reflection_parse_type*/ g_free (subinfo); } g_ptr_array_free (info->type_arguments, TRUE); } } /* * mono_reflection_type_from_name: * @name: type name. * @image: a metadata context (can be NULL). * * Retrieves a MonoType from its @name. If the name is not fully qualified, * it defaults to get the type from @image or, if @image is NULL or loading * from it fails, uses corlib. * */ MonoType* mono_reflection_type_from_name (char *name, MonoImage *image) { MonoType *type = NULL; MonoTypeNameParse info; char *tmp; /* Make a copy since parse_type modifies its argument */ tmp = g_strdup (name); /*g_print ("requested type %s\n", str);*/ if (mono_reflection_parse_type (tmp, &info)) { type = _mono_reflection_get_type_from_info (&info, image, FALSE); } g_free (tmp); mono_reflection_free_type_info (&info); return type; } /* * mono_reflection_get_token: * * Return the metadata token of OBJ which should be an object * representing a metadata element. */ guint32 mono_reflection_get_token (MonoObject *obj) { MonoClass *klass; guint32 token = 0; klass = obj->vtable->klass; if (strcmp (klass->name, "MethodBuilder") == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj; token = mb->table_idx | MONO_TOKEN_METHOD_DEF; } else if (strcmp (klass->name, "ConstructorBuilder") == 0) { MonoReflectionCtorBuilder *mb = (MonoReflectionCtorBuilder *)obj; token = mb->table_idx | MONO_TOKEN_METHOD_DEF; } else if (strcmp (klass->name, "FieldBuilder") == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)obj; /* Call mono_image_create_token so the object gets added to the tokens hash table */ token = mono_image_create_token (((MonoReflectionTypeBuilder*)fb->typeb)->module->dynamic_image, obj, FALSE, TRUE); } else if (strcmp (klass->name, "TypeBuilder") == 0) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)obj; token = tb->table_idx | MONO_TOKEN_TYPE_DEF; } else if (strcmp (klass->name, "MonoType") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); token = mono_class_from_mono_type (type)->type_token; } else if (strcmp (klass->name, "MonoCMethod") == 0 || strcmp (klass->name, "MonoMethod") == 0 || strcmp (klass->name, "MonoGenericMethod") == 0 || strcmp (klass->name, "MonoGenericCMethod") == 0) { MonoReflectionMethod *m = (MonoReflectionMethod *)obj; if (m->method->is_inflated) { MonoMethodInflated *inflated = (MonoMethodInflated *) m->method; return inflated->declaring->token; } else { token = m->method->token; } } else if (strcmp (klass->name, "MonoField") == 0) { MonoReflectionField *f = (MonoReflectionField*)obj; if (is_field_on_inst (f->field)) { MonoDynamicGenericClass *dgclass = (MonoDynamicGenericClass*)f->field->parent->generic_class; int field_index = f->field - dgclass->fields; MonoObject *obj; g_assert (field_index >= 0 && field_index < dgclass->count_fields); obj = dgclass->field_objects [field_index]; return mono_reflection_get_token (obj); } token = mono_class_get_field_token (f->field); } else if (strcmp (klass->name, "MonoProperty") == 0) { MonoReflectionProperty *p = (MonoReflectionProperty*)obj; token = mono_class_get_property_token (p->property); } else if (strcmp (klass->name, "MonoEvent") == 0) { MonoReflectionMonoEvent *p = (MonoReflectionMonoEvent*)obj; token = mono_class_get_event_token (p->event); } else if (strcmp (klass->name, "ParameterInfo") == 0) { MonoReflectionParameter *p = (MonoReflectionParameter*)obj; MonoClass *member_class = mono_object_class (p->MemberImpl); g_assert (mono_class_is_reflection_method_or_constructor (member_class)); token = mono_method_get_param_token (((MonoReflectionMethod*)p->MemberImpl)->method, p->PositionImpl); } else if (strcmp (klass->name, "Module") == 0) { MonoReflectionModule *m = (MonoReflectionModule*)obj; token = m->token; } else if (strcmp (klass->name, "Assembly") == 0) { token = mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1); } else { gchar *msg = g_strdup_printf ("MetadataToken is not supported for type '%s.%s'", klass->name_space, klass->name); MonoException *ex = mono_get_exception_not_implemented (msg); g_free (msg); mono_raise_exception (ex); } return token; } static void* load_cattr_value (MonoImage *image, MonoType *t, const char *p, const char **end) { int slen, type = t->type; MonoClass *tklass = t->data.klass; handle_enum: switch (type) { case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_BOOLEAN: { MonoBoolean *bval = g_malloc (sizeof (MonoBoolean)); *bval = *p; *end = p + 1; return bval; } case MONO_TYPE_CHAR: case MONO_TYPE_U2: case MONO_TYPE_I2: { guint16 *val = g_malloc (sizeof (guint16)); *val = read16 (p); *end = p + 2; return val; } #if SIZEOF_VOID_P == 4 case MONO_TYPE_U: case MONO_TYPE_I: #endif case MONO_TYPE_R4: case MONO_TYPE_U4: case MONO_TYPE_I4: { guint32 *val = g_malloc (sizeof (guint32)); *val = read32 (p); *end = p + 4; return val; } #if SIZEOF_VOID_P == 8 case MONO_TYPE_U: /* error out instead? this should probably not happen */ case MONO_TYPE_I: #endif case MONO_TYPE_U8: case MONO_TYPE_I8: { guint64 *val = g_malloc (sizeof (guint64)); *val = read64 (p); *end = p + 8; return val; } case MONO_TYPE_R8: { double *val = g_malloc (sizeof (double)); readr8 (p, val); *end = p + 8; return val; } case MONO_TYPE_VALUETYPE: if (t->data.klass->enumtype) { type = mono_class_enum_basetype (t->data.klass)->type; goto handle_enum; } else { g_error ("generic valutype %s not handled in custom attr value decoding", t->data.klass->name); } break; case MONO_TYPE_STRING: if (*p == (char)0xFF) { *end = p + 1; return NULL; } slen = mono_metadata_decode_value (p, &p); *end = p + slen; return mono_string_new_len (mono_domain_get (), p, slen); case MONO_TYPE_CLASS: { char *n; MonoType *t; if (*p == (char)0xFF) { *end = p + 1; return NULL; } handle_type: slen = mono_metadata_decode_value (p, &p); n = g_memdup (p, slen + 1); n [slen] = 0; t = mono_reflection_type_from_name (n, image); if (!t) g_warning ("Cannot load type '%s'", n); g_free (n); *end = p + slen; if (t) return mono_type_get_object (mono_domain_get (), t); else return NULL; } case MONO_TYPE_OBJECT: { char subt = *p++; MonoObject *obj; MonoClass *subc = NULL; void *val; if (subt == 0x50) { goto handle_type; } else if (subt == 0x0E) { type = MONO_TYPE_STRING; goto handle_enum; } else if (subt == 0x1D) { MonoType simple_type = {{0}}; int etype = *p; p ++; if (etype == 0x51) /* See Partition II, Appendix B3 */ etype = MONO_TYPE_OBJECT; type = MONO_TYPE_SZARRAY; simple_type.type = etype; tklass = mono_class_from_mono_type (&simple_type); goto handle_enum; } else if (subt == 0x55) { char *n; MonoType *t; slen = mono_metadata_decode_value (p, &p); n = g_memdup (p, slen + 1); n [slen] = 0; t = mono_reflection_type_from_name (n, image); if (!t) g_error ("Cannot load type '%s'", n); g_free (n); p += slen; subc = mono_class_from_mono_type (t); } else if (subt >= MONO_TYPE_BOOLEAN && subt <= MONO_TYPE_R8) { MonoType simple_type = {{0}}; simple_type.type = subt; subc = mono_class_from_mono_type (&simple_type); } else { g_error ("Unknown type 0x%02x for object type encoding in custom attr", subt); } val = load_cattr_value (image, &subc->byval_arg, p, end); obj = mono_object_new (mono_domain_get (), subc); memcpy ((char*)obj + sizeof (MonoObject), val, mono_class_value_size (subc, NULL)); g_free (val); return obj; } case MONO_TYPE_SZARRAY: { MonoArray *arr; guint32 i, alen, basetype; alen = read32 (p); p += 4; if (alen == 0xffffffff) { *end = p; return NULL; } arr = mono_array_new (mono_domain_get(), tklass, alen); basetype = tklass->byval_arg.type; if (basetype == MONO_TYPE_VALUETYPE && tklass->enumtype) basetype = mono_class_enum_basetype (tklass)->type; switch (basetype) { case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_BOOLEAN: for (i = 0; i < alen; i++) { MonoBoolean val = *p++; mono_array_set (arr, MonoBoolean, i, val); } break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: case MONO_TYPE_I2: for (i = 0; i < alen; i++) { guint16 val = read16 (p); mono_array_set (arr, guint16, i, val); p += 2; } break; case MONO_TYPE_R4: case MONO_TYPE_U4: case MONO_TYPE_I4: for (i = 0; i < alen; i++) { guint32 val = read32 (p); mono_array_set (arr, guint32, i, val); p += 4; } break; case MONO_TYPE_R8: for (i = 0; i < alen; i++) { double val; readr8 (p, &val); mono_array_set (arr, double, i, val); p += 8; } break; case MONO_TYPE_U8: case MONO_TYPE_I8: for (i = 0; i < alen; i++) { guint64 val = read64 (p); mono_array_set (arr, guint64, i, val); p += 8; } break; case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: for (i = 0; i < alen; i++) { MonoObject *item = load_cattr_value (image, &tklass->byval_arg, p, &p); mono_array_setref (arr, i, item); } break; default: g_error ("Type 0x%02x not handled in custom attr array decoding", basetype); } *end=p; return arr; } default: g_error ("Type 0x%02x not handled in custom attr value decoding", type); } return NULL; } static MonoObject* create_cattr_typed_arg (MonoType *t, MonoObject *val) { static MonoClass *klass; static MonoMethod *ctor; MonoObject *retval; void *params [2], *unboxed; if (!klass) klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "CustomAttributeTypedArgument"); if (!ctor) ctor = mono_class_get_method_from_name (klass, ".ctor", 2); params [0] = mono_type_get_object (mono_domain_get (), t); params [1] = val; retval = mono_object_new (mono_domain_get (), klass); unboxed = mono_object_unbox (retval); mono_runtime_invoke (ctor, unboxed, params, NULL); return retval; } static MonoObject* create_cattr_named_arg (void *minfo, MonoObject *typedarg) { static MonoClass *klass; static MonoMethod *ctor; MonoObject *retval; void *unboxed, *params [2]; if (!klass) klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "CustomAttributeNamedArgument"); if (!ctor) ctor = mono_class_get_method_from_name (klass, ".ctor", 2); params [0] = minfo; params [1] = typedarg; retval = mono_object_new (mono_domain_get (), klass); unboxed = mono_object_unbox (retval); mono_runtime_invoke (ctor, unboxed, params, NULL); return retval; } static gboolean type_is_reference (MonoType *type) { switch (type->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_U: case MONO_TYPE_I: case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_U2: case MONO_TYPE_I2: case MONO_TYPE_U4: case MONO_TYPE_I4: case MONO_TYPE_U8: case MONO_TYPE_I8: case MONO_TYPE_R8: case MONO_TYPE_R4: case MONO_TYPE_VALUETYPE: return FALSE; default: return TRUE; } } static void free_param_data (MonoMethodSignature *sig, void **params) { int i; for (i = 0; i < sig->param_count; ++i) { if (!type_is_reference (sig->params [i])) g_free (params [i]); } } /* * Find the field index in the metadata FieldDef table. */ static guint32 find_field_index (MonoClass *klass, MonoClassField *field) { int i; for (i = 0; i < klass->field.count; ++i) { if (field == &klass->fields [i]) return klass->field.first + 1 + i; } return 0; } /* * Find the property index in the metadata Property table. */ static guint32 find_property_index (MonoClass *klass, MonoProperty *property) { int i; for (i = 0; i < klass->ext->property.count; ++i) { if (property == &klass->ext->properties [i]) return klass->ext->property.first + 1 + i; } return 0; } /* * Find the event index in the metadata Event table. */ static guint32 find_event_index (MonoClass *klass, MonoEvent *event) { int i; for (i = 0; i < klass->ext->event.count; ++i) { if (event == &klass->ext->events [i]) return klass->ext->event.first + 1 + i; } return 0; } static MonoObject* create_custom_attr (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len) { const char *p = (const char*)data; const char *named; guint32 i, j, num_named; MonoObject *attr; void *params_buf [32]; void **params; MonoMethodSignature *sig; mono_class_init (method->klass); if (len == 0) { attr = mono_object_new (mono_domain_get (), method->klass); mono_runtime_invoke (method, attr, NULL, NULL); return attr; } if (len < 2 || read16 (p) != 0x0001) /* Prolog */ return NULL; /*g_print ("got attr %s\n", method->klass->name);*/ sig = mono_method_signature (method); if (sig->param_count < 32) params = params_buf; else /* Allocate using GC so it gets GC tracking */ params = mono_gc_alloc_fixed (sig->param_count * sizeof (void*), NULL); /* skip prolog */ p += 2; for (i = 0; i < mono_method_signature (method)->param_count; ++i) { params [i] = load_cattr_value (image, mono_method_signature (method)->params [i], p, &p); } named = p; attr = mono_object_new (mono_domain_get (), method->klass); mono_runtime_invoke (method, attr, params, NULL); free_param_data (method->signature, params); num_named = read16 (named); named += 2; for (j = 0; j < num_named; j++) { gint name_len; char *name, named_type, data_type; named_type = *named++; data_type = *named++; /* type of data */ if (data_type == MONO_TYPE_SZARRAY) data_type = *named++; if (data_type == MONO_TYPE_ENUM) { gint type_len; char *type_name; type_len = mono_metadata_decode_blob_size (named, &named); type_name = g_malloc (type_len + 1); memcpy (type_name, named, type_len); type_name [type_len] = 0; named += type_len; /* FIXME: lookup the type and check type consistency */ g_free (type_name); } name_len = mono_metadata_decode_blob_size (named, &named); name = g_malloc (name_len + 1); memcpy (name, named, name_len); name [name_len] = 0; named += name_len; if (named_type == 0x53) { MonoClassField *field = mono_class_get_field_from_name (mono_object_class (attr), name); void *val = load_cattr_value (image, field->type, named, &named); mono_field_set_value (attr, field, val); if (!type_is_reference (field->type)) g_free (val); } else if (named_type == 0x54) { MonoProperty *prop; void *pparams [1]; MonoType *prop_type; prop = mono_class_get_property_from_name (mono_object_class (attr), name); /* can we have more that 1 arg in a custom attr named property? */ prop_type = prop->get? mono_method_signature (prop->get)->ret : mono_method_signature (prop->set)->params [mono_method_signature (prop->set)->param_count - 1]; pparams [0] = load_cattr_value (image, prop_type, named, &named); mono_property_set_value (prop, attr, pparams, NULL); if (!type_is_reference (prop_type)) g_free (pparams [0]); } g_free (name); } if (params != params_buf) mono_gc_free_fixed (params); return attr; } /* * mono_reflection_create_custom_attr_data_args: * * Create an array of typed and named arguments from the cattr blob given by DATA. * TYPED_ARGS and NAMED_ARGS will contain the objects representing the arguments, * NAMED_ARG_INFO will contain information about the named arguments. */ void mono_reflection_create_custom_attr_data_args (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len, MonoArray **typed_args, MonoArray **named_args, CattrNamedArg **named_arg_info) { MonoArray *typedargs, *namedargs; MonoClass *attrklass; MonoDomain *domain; const char *p = (const char*)data; const char *named; guint32 i, j, num_named; CattrNamedArg *arginfo = NULL; mono_class_init (method->klass); *typed_args = NULL; *named_args = NULL; *named_arg_info = NULL; domain = mono_domain_get (); if (len < 2 || read16 (p) != 0x0001) /* Prolog */ return; typedargs = mono_array_new (domain, mono_get_object_class (), mono_method_signature (method)->param_count); /* skip prolog */ p += 2; for (i = 0; i < mono_method_signature (method)->param_count; ++i) { MonoObject *obj; void *val; val = load_cattr_value (image, mono_method_signature (method)->params [i], p, &p); obj = type_is_reference (mono_method_signature (method)->params [i]) ? val : mono_value_box (domain, mono_class_from_mono_type (mono_method_signature (method)->params [i]), val); mono_array_setref (typedargs, i, obj); if (!type_is_reference (mono_method_signature (method)->params [i])) g_free (val); } named = p; num_named = read16 (named); namedargs = mono_array_new (domain, mono_get_object_class (), num_named); named += 2; attrklass = method->klass; arginfo = g_new0 (CattrNamedArg, num_named); *named_arg_info = arginfo; for (j = 0; j < num_named; j++) { gint name_len; char *name, named_type, data_type; named_type = *named++; data_type = *named++; /* type of data */ if (data_type == MONO_TYPE_SZARRAY) data_type = *named++; if (data_type == MONO_TYPE_ENUM) { gint type_len; char *type_name; type_len = mono_metadata_decode_blob_size (named, &named); type_name = g_malloc (type_len + 1); memcpy (type_name, named, type_len); type_name [type_len] = 0; named += type_len; /* FIXME: lookup the type and check type consistency */ g_free (type_name); } name_len = mono_metadata_decode_blob_size (named, &named); name = g_malloc (name_len + 1); memcpy (name, named, name_len); name [name_len] = 0; named += name_len; if (named_type == 0x53) { MonoObject *obj; MonoClassField *field = mono_class_get_field_from_name (attrklass, name); void *val; arginfo [j].type = field->type; arginfo [j].field = field; val = load_cattr_value (image, field->type, named, &named); obj = type_is_reference (field->type) ? val : mono_value_box (domain, mono_class_from_mono_type (field->type), val); mono_array_setref (namedargs, j, obj); if (!type_is_reference (field->type)) g_free (val); } else if (named_type == 0x54) { MonoObject *obj; MonoType *prop_type; MonoProperty *prop = mono_class_get_property_from_name (attrklass, name); void *val; prop_type = prop->get? mono_method_signature (prop->get)->ret : mono_method_signature (prop->set)->params [mono_method_signature (prop->set)->param_count - 1]; arginfo [j].type = prop_type; arginfo [j].prop = prop; val = load_cattr_value (image, prop_type, named, &named); obj = type_is_reference (prop_type) ? val : mono_value_box (domain, mono_class_from_mono_type (prop_type), val); mono_array_setref (namedargs, j, obj); if (!type_is_reference (prop_type)) g_free (val); } g_free (name); } *typed_args = typedargs; *named_args = namedargs; } static MonoObject* create_custom_attr_data (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len) { MonoArray *typedargs, *namedargs; static MonoMethod *ctor; MonoDomain *domain; MonoObject *attr; void *params [3]; CattrNamedArg *arginfo; int i; mono_class_init (method->klass); if (!ctor) ctor = mono_class_get_method_from_name (mono_defaults.customattribute_data_class, ".ctor", 3); domain = mono_domain_get (); if (len == 0) { /* This is for Attributes with no parameters */ attr = mono_object_new (domain, mono_defaults.customattribute_data_class); params [0] = mono_method_get_object (domain, method, NULL); params [1] = params [2] = NULL; mono_runtime_invoke (method, attr, params, NULL); return attr; } mono_reflection_create_custom_attr_data_args (image, method, data, len, &typedargs, &namedargs, &arginfo); if (!typedargs || !namedargs) return NULL; for (i = 0; i < mono_method_signature (method)->param_count; ++i) { MonoObject *obj = mono_array_get (typedargs, MonoObject*, i); MonoObject *typedarg; typedarg = create_cattr_typed_arg (mono_method_signature (method)->params [i], obj); mono_array_setref (typedargs, i, typedarg); } for (i = 0; i < mono_array_length (namedargs); ++i) { MonoObject *obj = mono_array_get (namedargs, MonoObject*, i); MonoObject *typedarg, *namedarg, *minfo; if (arginfo [i].prop) minfo = (MonoObject*)mono_property_get_object (domain, NULL, arginfo [i].prop); else minfo = (MonoObject*)mono_field_get_object (domain, NULL, arginfo [i].field); typedarg = create_cattr_typed_arg (arginfo [i].type, obj); namedarg = create_cattr_named_arg (minfo, typedarg); mono_array_setref (namedargs, i, namedarg); } attr = mono_object_new (domain, mono_defaults.customattribute_data_class); params [0] = mono_method_get_object (domain, method, NULL); params [1] = typedargs; params [2] = namedargs; mono_runtime_invoke (ctor, attr, params, NULL); return attr; } MonoArray* mono_custom_attrs_construct (MonoCustomAttrInfo *cinfo) { MonoArray *result; MonoObject *attr; int i; result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, cinfo->num_attrs); for (i = 0; i < cinfo->num_attrs; ++i) { if (!cinfo->attrs [i].ctor) /* The cattr type is not finished yet */ /* We should include the type name but cinfo doesn't contain it */ mono_raise_exception (mono_get_exception_type_load (NULL, NULL)); attr = create_custom_attr (cinfo->image, cinfo->attrs [i].ctor, cinfo->attrs [i].data, cinfo->attrs [i].data_size); mono_array_setref (result, i, attr); } return result; } static MonoArray* mono_custom_attrs_construct_by_type (MonoCustomAttrInfo *cinfo, MonoClass *attr_klass) { MonoArray *result; MonoObject *attr; int i, n; n = 0; for (i = 0; i < cinfo->num_attrs; ++i) { if (mono_class_is_assignable_from (attr_klass, cinfo->attrs [i].ctor->klass)) n ++; } result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, n); n = 0; for (i = 0; i < cinfo->num_attrs; ++i) { if (mono_class_is_assignable_from (attr_klass, cinfo->attrs [i].ctor->klass)) { attr = create_custom_attr (cinfo->image, cinfo->attrs [i].ctor, cinfo->attrs [i].data, cinfo->attrs [i].data_size); mono_array_setref (result, n, attr); n ++; } } return result; } static MonoArray* mono_custom_attrs_data_construct (MonoCustomAttrInfo *cinfo) { MonoArray *result; MonoObject *attr; int i; result = mono_array_new (mono_domain_get (), mono_defaults.customattribute_data_class, cinfo->num_attrs); for (i = 0; i < cinfo->num_attrs; ++i) { attr = create_custom_attr_data (cinfo->image, cinfo->attrs [i].ctor, cinfo->attrs [i].data, cinfo->attrs [i].data_size); mono_array_setref (result, i, attr); } return result; } /** * mono_custom_attrs_from_index: * * Returns: NULL if no attributes are found or if a loading error occurs. */ MonoCustomAttrInfo* mono_custom_attrs_from_index (MonoImage *image, guint32 idx) { guint32 mtoken, i, len; guint32 cols [MONO_CUSTOM_ATTR_SIZE]; MonoTableInfo *ca; MonoCustomAttrInfo *ainfo; GList *tmp, *list = NULL; const char *data; ca = &image->tables [MONO_TABLE_CUSTOMATTRIBUTE]; i = mono_metadata_custom_attrs_from_index (image, idx); if (!i) return NULL; i --; while (i < ca->rows) { if (mono_metadata_decode_row_col (ca, i, MONO_CUSTOM_ATTR_PARENT) != idx) break; list = g_list_prepend (list, GUINT_TO_POINTER (i)); ++i; } len = g_list_length (list); if (!len) return NULL; ainfo = g_malloc0 (MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * len); ainfo->num_attrs = len; ainfo->image = image; for (i = 0, tmp = list; i < len; ++i, tmp = tmp->next) { mono_metadata_decode_row (ca, GPOINTER_TO_UINT (tmp->data), cols, MONO_CUSTOM_ATTR_SIZE); mtoken = cols [MONO_CUSTOM_ATTR_TYPE] >> MONO_CUSTOM_ATTR_TYPE_BITS; switch (cols [MONO_CUSTOM_ATTR_TYPE] & MONO_CUSTOM_ATTR_TYPE_MASK) { case MONO_CUSTOM_ATTR_TYPE_METHODDEF: mtoken |= MONO_TOKEN_METHOD_DEF; break; case MONO_CUSTOM_ATTR_TYPE_MEMBERREF: mtoken |= MONO_TOKEN_MEMBER_REF; break; default: g_error ("Unknown table for custom attr type %08x", cols [MONO_CUSTOM_ATTR_TYPE]); break; } ainfo->attrs [i].ctor = mono_get_method (image, mtoken, NULL); if (!ainfo->attrs [i].ctor) { g_warning ("Can't find custom attr constructor image: %s mtoken: 0x%08x", image->name, mtoken); g_list_free (list); g_free (ainfo); return NULL; } data = mono_metadata_blob_heap (image, cols [MONO_CUSTOM_ATTR_VALUE]); ainfo->attrs [i].data_size = mono_metadata_decode_value (data, &data); ainfo->attrs [i].data = (guchar*)data; } g_list_free (list); return ainfo; } MonoCustomAttrInfo* mono_custom_attrs_from_method (MonoMethod *method) { guint32 idx; /* * An instantiated method has the same cattrs as the generic method definition. * * LAMESPEC: The .NET SRE throws an exception for instantiations of generic method builders * Note that this stanza is not necessary for non-SRE types, but it's a micro-optimization */ if (method->is_inflated) method = ((MonoMethodInflated *) method)->declaring; if (method->dynamic || method->klass->image->dynamic) return lookup_custom_attr (method->klass->image, method); if (!method->token) /* Synthetic methods */ return NULL; idx = mono_method_get_index (method); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_METHODDEF; return mono_custom_attrs_from_index (method->klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_class (MonoClass *klass) { guint32 idx; if (klass->generic_class) klass = klass->generic_class->container_class; if (klass->image->dynamic) return lookup_custom_attr (klass->image, klass); if (klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR) { idx = mono_metadata_token_index (klass->sizes.generic_param_token); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_GENERICPAR; } else { idx = mono_metadata_token_index (klass->type_token); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_TYPEDEF; } return mono_custom_attrs_from_index (klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_assembly (MonoAssembly *assembly) { guint32 idx; if (assembly->image->dynamic) return lookup_custom_attr (assembly->image, assembly); idx = 1; /* there is only one assembly */ idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_ASSEMBLY; return mono_custom_attrs_from_index (assembly->image, idx); } static MonoCustomAttrInfo* mono_custom_attrs_from_module (MonoImage *image) { guint32 idx; if (image->dynamic) return lookup_custom_attr (image, image); idx = 1; /* there is only one module */ idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_MODULE; return mono_custom_attrs_from_index (image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_property (MonoClass *klass, MonoProperty *property) { guint32 idx; if (klass->image->dynamic) { property = mono_metadata_get_corresponding_property_from_generic_type_definition (property); return lookup_custom_attr (klass->image, property); } idx = find_property_index (klass, property); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_PROPERTY; return mono_custom_attrs_from_index (klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_event (MonoClass *klass, MonoEvent *event) { guint32 idx; if (klass->image->dynamic) { event = mono_metadata_get_corresponding_event_from_generic_type_definition (event); return lookup_custom_attr (klass->image, event); } idx = find_event_index (klass, event); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_EVENT; return mono_custom_attrs_from_index (klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_field (MonoClass *klass, MonoClassField *field) { guint32 idx; if (klass->image->dynamic) { field = mono_metadata_get_corresponding_field_from_generic_type_definition (field); return lookup_custom_attr (klass->image, field); } idx = find_field_index (klass, field); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_FIELDDEF; return mono_custom_attrs_from_index (klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_param (MonoMethod *method, guint32 param) { MonoTableInfo *ca; guint32 i, idx, method_index; guint32 param_list, param_last, param_pos, found; MonoImage *image; MonoReflectionMethodAux *aux; /* * An instantiated method has the same cattrs as the generic method definition. * * LAMESPEC: The .NET SRE throws an exception for instantiations of generic method builders * Note that this stanza is not necessary for non-SRE types, but it's a micro-optimization */ if (method->is_inflated) method = ((MonoMethodInflated *) method)->declaring; if (method->klass->image->dynamic) { MonoCustomAttrInfo *res, *ainfo; int size; aux = g_hash_table_lookup (((MonoDynamicImage*)method->klass->image)->method_aux_hash, method); if (!aux || !aux->param_cattr) return NULL; /* Need to copy since it will be freed later */ ainfo = aux->param_cattr [param]; if (!ainfo) return NULL; size = MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * ainfo->num_attrs; res = g_malloc0 (size); memcpy (res, ainfo, size); return res; } image = method->klass->image; method_index = mono_method_get_index (method); ca = &image->tables [MONO_TABLE_METHOD]; param_list = mono_metadata_decode_row_col (ca, method_index - 1, MONO_METHOD_PARAMLIST); if (method_index == ca->rows) { ca = &image->tables [MONO_TABLE_PARAM]; param_last = ca->rows + 1; } else { param_last = mono_metadata_decode_row_col (ca, method_index, MONO_METHOD_PARAMLIST); ca = &image->tables [MONO_TABLE_PARAM]; } found = FALSE; for (i = param_list; i < param_last; ++i) { param_pos = mono_metadata_decode_row_col (ca, i - 1, MONO_PARAM_SEQUENCE); if (param_pos == param) { found = TRUE; break; } } if (!found) return NULL; idx = i; idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_PARAMDEF; return mono_custom_attrs_from_index (image, idx); } gboolean mono_custom_attrs_has_attr (MonoCustomAttrInfo *ainfo, MonoClass *attr_klass) { int i; MonoClass *klass; for (i = 0; i < ainfo->num_attrs; ++i) { klass = ainfo->attrs [i].ctor->klass; if (mono_class_has_parent (klass, attr_klass) || (MONO_CLASS_IS_INTERFACE (attr_klass) && mono_class_is_assignable_from (attr_klass, klass))) return TRUE; } return FALSE; } MonoObject* mono_custom_attrs_get_attr (MonoCustomAttrInfo *ainfo, MonoClass *attr_klass) { int i, attr_index; MonoClass *klass; MonoArray *attrs; attr_index = -1; for (i = 0; i < ainfo->num_attrs; ++i) { klass = ainfo->attrs [i].ctor->klass; if (mono_class_has_parent (klass, attr_klass)) { attr_index = i; break; } } if (attr_index == -1) return NULL; attrs = mono_custom_attrs_construct (ainfo); if (attrs) return mono_array_get (attrs, MonoObject*, attr_index); else return NULL; } /* * mono_reflection_get_custom_attrs_info: * @obj: a reflection object handle * * Return the custom attribute info for attributes defined for the * reflection handle @obj. The objects. * * FIXME this function leaks like a sieve for SRE objects. */ MonoCustomAttrInfo* mono_reflection_get_custom_attrs_info (MonoObject *obj) { MonoClass *klass; MonoCustomAttrInfo *cinfo = NULL; klass = obj->vtable->klass; if (klass == mono_defaults.monotype_class) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); klass = mono_class_from_mono_type (type); cinfo = mono_custom_attrs_from_class (klass); } else if (strcmp ("Assembly", klass->name) == 0) { MonoReflectionAssembly *rassembly = (MonoReflectionAssembly*)obj; cinfo = mono_custom_attrs_from_assembly (rassembly->assembly); } else if (strcmp ("Module", klass->name) == 0) { MonoReflectionModule *module = (MonoReflectionModule*)obj; cinfo = mono_custom_attrs_from_module (module->image); } else if (strcmp ("MonoProperty", klass->name) == 0) { MonoReflectionProperty *rprop = (MonoReflectionProperty*)obj; cinfo = mono_custom_attrs_from_property (rprop->property->parent, rprop->property); } else if (strcmp ("MonoEvent", klass->name) == 0) { MonoReflectionMonoEvent *revent = (MonoReflectionMonoEvent*)obj; cinfo = mono_custom_attrs_from_event (revent->event->parent, revent->event); } else if (strcmp ("MonoField", klass->name) == 0) { MonoReflectionField *rfield = (MonoReflectionField*)obj; cinfo = mono_custom_attrs_from_field (rfield->field->parent, rfield->field); } else if ((strcmp ("MonoMethod", klass->name) == 0) || (strcmp ("MonoCMethod", klass->name) == 0)) { MonoReflectionMethod *rmethod = (MonoReflectionMethod*)obj; cinfo = mono_custom_attrs_from_method (rmethod->method); } else if ((strcmp ("MonoGenericMethod", klass->name) == 0) || (strcmp ("MonoGenericCMethod", klass->name) == 0)) { MonoReflectionMethod *rmethod = (MonoReflectionMethod*)obj; cinfo = mono_custom_attrs_from_method (rmethod->method); } else if (strcmp ("ParameterInfo", klass->name) == 0) { MonoReflectionParameter *param = (MonoReflectionParameter*)obj; MonoClass *member_class = mono_object_class (param->MemberImpl); if (mono_class_is_reflection_method_or_constructor (member_class)) { MonoReflectionMethod *rmethod = (MonoReflectionMethod*)param->MemberImpl; cinfo = mono_custom_attrs_from_param (rmethod->method, param->PositionImpl + 1); } else if (is_sr_mono_property (member_class)) { MonoReflectionProperty *prop = (MonoReflectionProperty *)param->MemberImpl; MonoMethod *method; if (!(method = prop->property->get)) method = prop->property->set; g_assert (method); cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1); } else if (is_sre_method_on_tb_inst (member_class)) {/*XXX This is a workaround for Compiler Context*/ MonoMethod *method = mono_reflection_method_on_tb_inst_get_handle ((MonoReflectionMethodOnTypeBuilderInst*)param->MemberImpl); cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1); } else if (is_sre_ctor_on_tb_inst (member_class)) { /*XX This is a workaround for Compiler Context*/ MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)param->MemberImpl; MonoMethod *method = NULL; if (is_sre_ctor_builder (mono_object_class (c->cb))) method = ((MonoReflectionCtorBuilder *)c->cb)->mhandle; else if (is_sr_mono_cmethod (mono_object_class (c->cb))) method = ((MonoReflectionMethod *)c->cb)->method; else g_error ("mono_reflection_get_custom_attrs_info:: can't handle a CTBI with base_method of type %s", mono_type_get_full_name (member_class)); cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1); } else { char *type_name = mono_type_get_full_name (member_class); char *msg = g_strdup_printf ("Custom attributes on a ParamInfo with member %s are not supported", type_name); MonoException *ex = mono_get_exception_not_supported (msg); g_free (type_name); g_free (msg); mono_raise_exception (ex); } } else if (strcmp ("AssemblyBuilder", klass->name) == 0) { MonoReflectionAssemblyBuilder *assemblyb = (MonoReflectionAssemblyBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, assemblyb->assembly.assembly->image, assemblyb->cattrs); } else if (strcmp ("TypeBuilder", klass->name) == 0) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, &tb->module->dynamic_image->image, tb->cattrs); } else if (strcmp ("ModuleBuilder", klass->name) == 0) { MonoReflectionModuleBuilder *mb = (MonoReflectionModuleBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, &mb->dynamic_image->image, mb->cattrs); } else if (strcmp ("ConstructorBuilder", klass->name) == 0) { MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, cb->mhandle->klass->image, cb->cattrs); } else if (strcmp ("MethodBuilder", klass->name) == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, mb->mhandle->klass->image, mb->cattrs); } else if (strcmp ("FieldBuilder", klass->name) == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, &((MonoReflectionTypeBuilder*)fb->typeb)->module->dynamic_image->image, fb->cattrs); } else if (strcmp ("MonoGenericClass", klass->name) == 0) { MonoReflectionGenericClass *gclass = (MonoReflectionGenericClass*)obj; cinfo = mono_reflection_get_custom_attrs_info ((MonoObject*)gclass->generic_type); } else { /* handle other types here... */ g_error ("get custom attrs not yet supported for %s", klass->name); } return cinfo; } /* * mono_reflection_get_custom_attrs_by_type: * @obj: a reflection object handle * * Return an array with all the custom attributes defined of the * reflection handle @obj. If @attr_klass is non-NULL, only custom attributes * of that type are returned. The objects are fully build. Return NULL if a loading error * occurs. */ MonoArray* mono_reflection_get_custom_attrs_by_type (MonoObject *obj, MonoClass *attr_klass) { MonoArray *result; MonoCustomAttrInfo *cinfo; cinfo = mono_reflection_get_custom_attrs_info (obj); if (cinfo) { if (attr_klass) result = mono_custom_attrs_construct_by_type (cinfo, attr_klass); else result = mono_custom_attrs_construct (cinfo); if (!cinfo->cached) mono_custom_attrs_free (cinfo); } else { if (mono_loader_get_last_error ()) return NULL; result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, 0); } return result; } /* * mono_reflection_get_custom_attrs: * @obj: a reflection object handle * * Return an array with all the custom attributes defined of the * reflection handle @obj. The objects are fully build. Return NULL if a loading error * occurs. */ MonoArray* mono_reflection_get_custom_attrs (MonoObject *obj) { return mono_reflection_get_custom_attrs_by_type (obj, NULL); } /* * mono_reflection_get_custom_attrs_data: * @obj: a reflection obj handle * * Returns an array of System.Reflection.CustomAttributeData, * which include information about attributes reflected on * types loaded using the Reflection Only methods */ MonoArray* mono_reflection_get_custom_attrs_data (MonoObject *obj) { MonoArray *result; MonoCustomAttrInfo *cinfo; cinfo = mono_reflection_get_custom_attrs_info (obj); if (cinfo) { result = mono_custom_attrs_data_construct (cinfo); if (!cinfo->cached) mono_custom_attrs_free (cinfo); } else result = mono_array_new (mono_domain_get (), mono_defaults.customattribute_data_class, 0); return result; } static MonoReflectionType* mono_reflection_type_get_underlying_system_type (MonoReflectionType* t) { MonoMethod *method_get_underlying_system_type; method_get_underlying_system_type = mono_object_get_virtual_method ((MonoObject *) t, mono_class_get_method_from_name (mono_object_class (t), "get_UnderlyingSystemType", 0)); return (MonoReflectionType *) mono_runtime_invoke (method_get_underlying_system_type, t, NULL, NULL); } #ifndef DISABLE_REFLECTION_EMIT static gboolean is_corlib_type (MonoClass *class) { return class->image == mono_defaults.corlib; } static gboolean is_usertype (MonoReflectionType *ref) { MonoClass *class = mono_object_class (ref); return class->image != mono_defaults.corlib || strcmp ("TypeDelegator", class->name) == 0; } #define check_corlib_type_cached(_class, _namespace, _name) do { \ static MonoClass *cached_class; \ if (cached_class) \ return cached_class == _class; \ if (is_corlib_type (_class) && !strcmp (_name, _class->name) && !strcmp (_namespace, _class->name_space)) { \ cached_class = _class; \ return TRUE; \ } \ return FALSE; \ } while (0) \ static gboolean is_sre_array (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "ArrayType"); } static gboolean is_sre_byref (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "ByRefType"); } static gboolean is_sre_pointer (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "PointerType"); } static gboolean is_sre_generic_instance (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoGenericClass"); } static gboolean is_sre_method_builder (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "MethodBuilder"); } static gboolean is_sre_ctor_builder (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "ConstructorBuilder"); } static gboolean is_sr_mono_method (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoMethod"); } static gboolean is_sr_mono_cmethod (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoCMethod"); } static gboolean is_sr_mono_generic_method (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoGenericMethod"); } static gboolean is_sr_mono_generic_cmethod (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoGenericCMethod"); } static gboolean is_sr_mono_property (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoProperty"); } static gboolean is_sre_method_on_tb_inst (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "MethodOnTypeBuilderInst"); } static gboolean is_sre_ctor_on_tb_inst (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "ConstructorOnTypeBuilderInst"); } gboolean mono_class_is_reflection_method_or_constructor (MonoClass *class) { return is_sr_mono_method (class) || is_sr_mono_cmethod (class) || is_sr_mono_generic_method (class) || is_sr_mono_generic_cmethod (class); } MonoType* mono_reflection_type_get_handle (MonoReflectionType* ref) { MonoClass *class; if (!ref) return NULL; if (ref->type) return ref->type; if (is_usertype (ref)) { ref = mono_reflection_type_get_underlying_system_type (ref); g_assert (!is_usertype (ref)); /*FIXME fail better*/ if (ref->type) return ref->type; } class = mono_object_class (ref); if (is_sre_array (class)) { MonoType *res; MonoReflectionArrayType *sre_array = (MonoReflectionArrayType*)ref; MonoType *base = mono_reflection_type_get_handle (sre_array->element_type); g_assert (base); if (sre_array->rank == 0) //single dimentional array res = &mono_array_class_get (mono_class_from_mono_type (base), 1)->byval_arg; else res = &mono_bounded_array_class_get (mono_class_from_mono_type (base), sre_array->rank, TRUE)->byval_arg; sre_array->type.type = res; return res; } else if (is_sre_byref (class)) { MonoType *res; MonoReflectionDerivedType *sre_byref = (MonoReflectionDerivedType*)ref; MonoType *base = mono_reflection_type_get_handle (sre_byref->element_type); g_assert (base); res = &mono_class_from_mono_type (base)->this_arg; sre_byref->type.type = res; return res; } else if (is_sre_pointer (class)) { MonoType *res; MonoReflectionDerivedType *sre_pointer = (MonoReflectionDerivedType*)ref; MonoType *base = mono_reflection_type_get_handle (sre_pointer->element_type); g_assert (base); res = &mono_ptr_class_get (base)->byval_arg; sre_pointer->type.type = res; return res; } else if (is_sre_generic_instance (class)) { MonoType *res, **types; MonoReflectionGenericClass *gclass = (MonoReflectionGenericClass*)ref; int i, count; count = mono_array_length (gclass->type_arguments); types = g_new0 (MonoType*, count); for (i = 0; i < count; ++i) { MonoReflectionType *t = mono_array_get (gclass->type_arguments, gpointer, i); types [i] = mono_reflection_type_get_handle (t); } res = mono_reflection_bind_generic_parameters ((MonoReflectionType*)gclass->generic_type, count, types); g_free (types); g_assert (res); gclass->type.type = res; return res; } g_error ("Cannot handle corlib user type %s", mono_type_full_name (&mono_object_class(ref)->byval_arg)); return NULL; } static MonoReflectionType* mono_reflection_type_resolve_user_types (MonoReflectionType *type) { if (!type || type->type) return type; if (is_usertype (type)) { type = mono_reflection_type_get_underlying_system_type (type); if (is_usertype (type)) mono_raise_exception (mono_get_exception_not_supported ("User defined subclasses of System.Type are not yet supported22")); } return type; } void mono_reflection_create_unmanaged_type (MonoReflectionType *type) { mono_reflection_type_get_handle (type); } /** * LOCKING: Assumes the loader lock is held. */ static MonoMethodSignature* parameters_to_signature (MonoImage *image, MonoArray *parameters) { MonoMethodSignature *sig; int count, i; count = parameters? mono_array_length (parameters): 0; sig = image_g_malloc0 (image, MONO_SIZEOF_METHOD_SIGNATURE + sizeof (MonoType*) * count); sig->param_count = count; sig->sentinelpos = -1; /* FIXME */ for (i = 0; i < count; ++i) sig->params [i] = mono_type_array_get_and_resolve (parameters, i); return sig; } /** * LOCKING: Assumes the loader lock is held. */ static MonoMethodSignature* ctor_builder_to_signature (MonoImage *image, MonoReflectionCtorBuilder *ctor) { MonoMethodSignature *sig; sig = parameters_to_signature (image, ctor->parameters); sig->hasthis = ctor->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1; sig->ret = &mono_defaults.void_class->byval_arg; return sig; } /** * LOCKING: Assumes the loader lock is held. */ static MonoMethodSignature* method_builder_to_signature (MonoImage *image, MonoReflectionMethodBuilder *method) { MonoMethodSignature *sig; sig = parameters_to_signature (image, method->parameters); sig->hasthis = method->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1; sig->ret = method->rtype? mono_reflection_type_get_handle ((MonoReflectionType*)method->rtype): &mono_defaults.void_class->byval_arg; sig->generic_param_count = method->generic_params ? mono_array_length (method->generic_params) : 0; return sig; } static MonoMethodSignature* dynamic_method_to_signature (MonoReflectionDynamicMethod *method) { MonoMethodSignature *sig; sig = parameters_to_signature (NULL, method->parameters); sig->hasthis = method->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1; sig->ret = method->rtype? mono_reflection_type_get_handle (method->rtype): &mono_defaults.void_class->byval_arg; sig->generic_param_count = 0; return sig; } static void get_prop_name_and_type (MonoObject *prop, char **name, MonoType **type) { MonoClass *klass = mono_object_class (prop); if (strcmp (klass->name, "PropertyBuilder") == 0) { MonoReflectionPropertyBuilder *pb = (MonoReflectionPropertyBuilder *)prop; *name = mono_string_to_utf8 (pb->name); *type = mono_reflection_type_get_handle ((MonoReflectionType*)pb->type); } else { MonoReflectionProperty *p = (MonoReflectionProperty *)prop; *name = g_strdup (p->property->name); if (p->property->get) *type = mono_method_signature (p->property->get)->ret; else *type = mono_method_signature (p->property->set)->params [mono_method_signature (p->property->set)->param_count - 1]; } } static void get_field_name_and_type (MonoObject *field, char **name, MonoType **type) { MonoClass *klass = mono_object_class (field); if (strcmp (klass->name, "FieldBuilder") == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)field; *name = mono_string_to_utf8 (fb->name); *type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); } else { MonoReflectionField *f = (MonoReflectionField *)field; *name = g_strdup (mono_field_get_name (f->field)); *type = f->field->type; } } #endif /* !DISABLE_REFLECTION_EMIT */ /* * Encode a value in a custom attribute stream of bytes. * The value to encode is either supplied as an object in argument val * (valuetypes are boxed), or as a pointer to the data in the * argument argval. * @type represents the type of the value * @buffer is the start of the buffer * @p the current position in the buffer * @buflen contains the size of the buffer and is used to return the new buffer size * if this needs to be realloced. * @retbuffer and @retp return the start and the position of the buffer */ static void encode_cattr_value (MonoAssembly *assembly, char *buffer, char *p, char **retbuffer, char **retp, guint32 *buflen, MonoType *type, MonoObject *arg, char *argval) { MonoTypeEnum simple_type; if ((p-buffer) + 10 >= *buflen) { char *newbuf; *buflen *= 2; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } if (!argval) argval = ((char*)arg + sizeof (MonoObject)); simple_type = type->type; handle_enum: switch (simple_type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: case MONO_TYPE_I1: *p++ = *argval; break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: case MONO_TYPE_I2: swap_with_size (p, argval, 2, 1); p += 2; break; case MONO_TYPE_U4: case MONO_TYPE_I4: case MONO_TYPE_R4: swap_with_size (p, argval, 4, 1); p += 4; break; case MONO_TYPE_R8: #if defined(ARM_FPU_FPA) && G_BYTE_ORDER == G_LITTLE_ENDIAN p [0] = argval [4]; p [1] = argval [5]; p [2] = argval [6]; p [3] = argval [7]; p [4] = argval [0]; p [5] = argval [1]; p [6] = argval [2]; p [7] = argval [3]; #else swap_with_size (p, argval, 8, 1); #endif p += 8; break; case MONO_TYPE_U8: case MONO_TYPE_I8: swap_with_size (p, argval, 8, 1); p += 8; break; case MONO_TYPE_VALUETYPE: if (type->data.klass->enumtype) { simple_type = mono_class_enum_basetype (type->data.klass)->type; goto handle_enum; } else { g_warning ("generic valutype %s not handled in custom attr value decoding", type->data.klass->name); } break; case MONO_TYPE_STRING: { char *str; guint32 slen; if (!arg) { *p++ = 0xFF; break; } str = mono_string_to_utf8 ((MonoString*)arg); slen = strlen (str); if ((p-buffer) + 10 + slen >= *buflen) { char *newbuf; *buflen *= 2; *buflen += slen; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } mono_metadata_encode_value (slen, p, &p); memcpy (p, str, slen); p += slen; g_free (str); break; } case MONO_TYPE_CLASS: { char *str; guint32 slen; if (!arg) { *p++ = 0xFF; break; } handle_type: str = type_get_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)arg), NULL); slen = strlen (str); if ((p-buffer) + 10 + slen >= *buflen) { char *newbuf; *buflen *= 2; *buflen += slen; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } mono_metadata_encode_value (slen, p, &p); memcpy (p, str, slen); p += slen; g_free (str); break; } case MONO_TYPE_SZARRAY: { int len, i; MonoClass *eclass, *arg_eclass; if (!arg) { *p++ = 0xff; *p++ = 0xff; *p++ = 0xff; *p++ = 0xff; break; } len = mono_array_length ((MonoArray*)arg); *p++ = len & 0xff; *p++ = (len >> 8) & 0xff; *p++ = (len >> 16) & 0xff; *p++ = (len >> 24) & 0xff; *retp = p; *retbuffer = buffer; eclass = type->data.klass; arg_eclass = mono_object_class (arg)->element_class; if (!eclass) { /* Happens when we are called from the MONO_TYPE_OBJECT case below */ eclass = mono_defaults.object_class; } if (eclass == mono_defaults.object_class && arg_eclass->valuetype) { char *elptr = mono_array_addr ((MonoArray*)arg, char, 0); int elsize = mono_class_array_element_size (arg_eclass); for (i = 0; i < len; ++i) { encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &arg_eclass->byval_arg, NULL, elptr); elptr += elsize; } } else if (eclass->valuetype && arg_eclass->valuetype) { char *elptr = mono_array_addr ((MonoArray*)arg, char, 0); int elsize = mono_class_array_element_size (eclass); for (i = 0; i < len; ++i) { encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &eclass->byval_arg, NULL, elptr); elptr += elsize; } } else { for (i = 0; i < len; ++i) { encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &eclass->byval_arg, mono_array_get ((MonoArray*)arg, MonoObject*, i), NULL); } } break; } case MONO_TYPE_OBJECT: { MonoClass *klass; char *str; guint32 slen; /* * The parameter type is 'object' but the type of the actual * argument is not. So we have to add type information to the blob * too. This is completely undocumented in the spec. */ if (arg == NULL) { *p++ = MONO_TYPE_STRING; // It's same hack as MS uses *p++ = 0xFF; break; } klass = mono_object_class (arg); if (mono_object_isinst (arg, mono_defaults.systemtype_class)) { *p++ = 0x50; goto handle_type; } else if (klass->enumtype) { *p++ = 0x55; } else if (klass == mono_defaults.string_class) { simple_type = MONO_TYPE_STRING; *p++ = 0x0E; goto handle_enum; } else if (klass->rank == 1) { *p++ = 0x1D; if (klass->element_class->byval_arg.type == MONO_TYPE_OBJECT) /* See Partition II, Appendix B3 */ *p++ = 0x51; else *p++ = klass->element_class->byval_arg.type; encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &klass->byval_arg, arg, NULL); break; } else if (klass->byval_arg.type >= MONO_TYPE_BOOLEAN && klass->byval_arg.type <= MONO_TYPE_R8) { *p++ = simple_type = klass->byval_arg.type; goto handle_enum; } else { g_error ("unhandled type in custom attr"); } str = type_get_qualified_name (mono_class_get_type(klass), NULL); slen = strlen (str); if ((p-buffer) + 10 + slen >= *buflen) { char *newbuf; *buflen *= 2; *buflen += slen; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } mono_metadata_encode_value (slen, p, &p); memcpy (p, str, slen); p += slen; g_free (str); simple_type = mono_class_enum_basetype (klass)->type; goto handle_enum; } default: g_error ("type 0x%02x not yet supported in custom attr encoder", simple_type); } *retp = p; *retbuffer = buffer; } static void encode_field_or_prop_type (MonoType *type, char *p, char **retp) { if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype) { char *str = type_get_qualified_name (type, NULL); int slen = strlen (str); *p++ = 0x55; /* * This seems to be optional... * *p++ = 0x80; */ mono_metadata_encode_value (slen, p, &p); memcpy (p, str, slen); p += slen; g_free (str); } else if (type->type == MONO_TYPE_OBJECT) { *p++ = 0x51; } else if (type->type == MONO_TYPE_CLASS) { /* it should be a type: encode_cattr_value () has the check */ *p++ = 0x50; } else { mono_metadata_encode_value (type->type, p, &p); if (type->type == MONO_TYPE_SZARRAY) /* See the examples in Partition VI, Annex B */ encode_field_or_prop_type (&type->data.klass->byval_arg, p, &p); } *retp = p; } #ifndef DISABLE_REFLECTION_EMIT static void encode_named_val (MonoReflectionAssembly *assembly, char *buffer, char *p, char **retbuffer, char **retp, guint32 *buflen, MonoType *type, char *name, MonoObject *value) { int len; /* Preallocate a large enough buffer */ if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype) { char *str = type_get_qualified_name (type, NULL); len = strlen (str); g_free (str); } else if (type->type == MONO_TYPE_SZARRAY && type->data.klass->enumtype) { char *str = type_get_qualified_name (&type->data.klass->byval_arg, NULL); len = strlen (str); g_free (str); } else { len = 0; } len += strlen (name); if ((p-buffer) + 20 + len >= *buflen) { char *newbuf; *buflen *= 2; *buflen += len; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } encode_field_or_prop_type (type, p, &p); len = strlen (name); mono_metadata_encode_value (len, p, &p); memcpy (p, name, len); p += len; encode_cattr_value (assembly->assembly, buffer, p, &buffer, &p, buflen, type, value, NULL); *retp = p; *retbuffer = buffer; } /* * mono_reflection_get_custom_attrs_blob: * @ctor: custom attribute constructor * @ctorArgs: arguments o the constructor * @properties: * @propValues: * @fields: * @fieldValues: * * Creates the blob of data that needs to be saved in the metadata and that represents * the custom attributed described by @ctor, @ctorArgs etc. * Returns: a Byte array representing the blob of data. */ MonoArray* mono_reflection_get_custom_attrs_blob (MonoReflectionAssembly *assembly, MonoObject *ctor, MonoArray *ctorArgs, MonoArray *properties, MonoArray *propValues, MonoArray *fields, MonoArray* fieldValues) { MonoArray *result; MonoMethodSignature *sig; MonoObject *arg; char *buffer, *p; guint32 buflen, i; MONO_ARCH_SAVE_REGS; if (strcmp (ctor->vtable->klass->name, "MonoCMethod")) { /* sig is freed later so allocate it in the heap */ sig = ctor_builder_to_signature (NULL, (MonoReflectionCtorBuilder*)ctor); } else { sig = mono_method_signature (((MonoReflectionMethod*)ctor)->method); } g_assert (mono_array_length (ctorArgs) == sig->param_count); buflen = 256; p = buffer = g_malloc (buflen); /* write the prolog */ *p++ = 1; *p++ = 0; for (i = 0; i < sig->param_count; ++i) { arg = mono_array_get (ctorArgs, MonoObject*, i); encode_cattr_value (assembly->assembly, buffer, p, &buffer, &p, &buflen, sig->params [i], arg, NULL); } i = 0; if (properties) i += mono_array_length (properties); if (fields) i += mono_array_length (fields); *p++ = i & 0xff; *p++ = (i >> 8) & 0xff; if (properties) { MonoObject *prop; for (i = 0; i < mono_array_length (properties); ++i) { MonoType *ptype; char *pname; prop = mono_array_get (properties, gpointer, i); get_prop_name_and_type (prop, &pname, &ptype); *p++ = 0x54; /* PROPERTY signature */ encode_named_val (assembly, buffer, p, &buffer, &p, &buflen, ptype, pname, (MonoObject*)mono_array_get (propValues, gpointer, i)); g_free (pname); } } if (fields) { MonoObject *field; for (i = 0; i < mono_array_length (fields); ++i) { MonoType *ftype; char *fname; field = mono_array_get (fields, gpointer, i); get_field_name_and_type (field, &fname, &ftype); *p++ = 0x53; /* FIELD signature */ encode_named_val (assembly, buffer, p, &buffer, &p, &buflen, ftype, fname, (MonoObject*)mono_array_get (fieldValues, gpointer, i)); g_free (fname); } } g_assert (p - buffer <= buflen); buflen = p - buffer; result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen); p = mono_array_addr (result, char, 0); memcpy (p, buffer, buflen); g_free (buffer); if (strcmp (ctor->vtable->klass->name, "MonoCMethod")) g_free (sig); return result; } /* * mono_reflection_setup_internal_class: * @tb: a TypeBuilder object * * Creates a MonoClass that represents the TypeBuilder. * This is a trick that lets us simplify a lot of reflection code * (and will allow us to support Build and Run assemblies easier). */ void mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb) { MonoError error; MonoClass *klass, *parent; MONO_ARCH_SAVE_REGS; RESOLVE_TYPE (tb->parent); mono_loader_lock (); if (tb->parent) { /* check so we can compile corlib correctly */ if (strcmp (mono_object_class (tb->parent)->name, "TypeBuilder") == 0) { /* mono_class_setup_mono_type () guaranteess type->data.klass is valid */ parent = mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent)->data.klass; } else { parent = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent)); } } else { parent = NULL; } /* the type has already being created: it means we just have to change the parent */ if (tb->type.type) { klass = mono_class_from_mono_type (tb->type.type); klass->parent = NULL; /* fool mono_class_setup_parent */ klass->supertypes = NULL; mono_class_setup_parent (klass, parent); mono_class_setup_mono_type (klass); mono_loader_unlock (); return; } klass = mono_image_alloc0 (&tb->module->dynamic_image->image, sizeof (MonoClass)); klass->image = &tb->module->dynamic_image->image; klass->inited = 1; /* we lie to the runtime */ klass->name = mono_string_to_utf8_image (klass->image, tb->name, &error); if (!mono_error_ok (&error)) goto failure; klass->name_space = mono_string_to_utf8_image (klass->image, tb->nspace, &error); if (!mono_error_ok (&error)) goto failure; klass->type_token = MONO_TOKEN_TYPE_DEF | tb->table_idx; klass->flags = tb->attrs; mono_profiler_class_event (klass, MONO_PROFILE_START_LOAD); klass->element_class = klass; MOVING_GC_REGISTER (&klass->reflection_info); klass->reflection_info = tb; /* Put into cache so mono_class_get () will find it */ mono_image_add_to_name_cache (klass->image, klass->name_space, klass->name, tb->table_idx); mono_g_hash_table_insert (tb->module->dynamic_image->tokens, GUINT_TO_POINTER (MONO_TOKEN_TYPE_DEF | tb->table_idx), tb); if (parent != NULL) { mono_class_setup_parent (klass, parent); } else if (strcmp (klass->name, "Object") == 0 && strcmp (klass->name_space, "System") == 0) { const char *old_n = klass->name; /* trick to get relative numbering right when compiling corlib */ klass->name = "BuildingObject"; mono_class_setup_parent (klass, mono_defaults.object_class); klass->name = old_n; } if ((!strcmp (klass->name, "ValueType") && !strcmp (klass->name_space, "System")) || (!strcmp (klass->name, "Object") && !strcmp (klass->name_space, "System")) || (!strcmp (klass->name, "Enum") && !strcmp (klass->name_space, "System"))) { klass->instance_size = sizeof (MonoObject); klass->size_inited = 1; mono_class_setup_vtable_general (klass, NULL, 0); } mono_class_setup_mono_type (klass); mono_class_setup_supertypes (klass); /* * FIXME: handle interfaces. */ tb->type.type = &klass->byval_arg; if (tb->nesting_type) { g_assert (tb->nesting_type->type); klass->nested_in = mono_class_from_mono_type (mono_reflection_type_get_handle (tb->nesting_type)); } /*g_print ("setup %s as %s (%p)\n", klass->name, ((MonoObject*)tb)->vtable->klass->name, tb);*/ mono_profiler_class_loaded (klass, MONO_PROFILE_OK); mono_loader_unlock (); return; failure: mono_loader_unlock (); mono_error_raise_exception (&error); } /* * mono_reflection_setup_generic_class: * @tb: a TypeBuilder object * * Setup the generic class before adding the first generic parameter. */ void mono_reflection_setup_generic_class (MonoReflectionTypeBuilder *tb) { } /* * mono_reflection_create_generic_class: * @tb: a TypeBuilder object * * Creates the generic class after all generic parameters have been added. */ void mono_reflection_create_generic_class (MonoReflectionTypeBuilder *tb) { MonoClass *klass; int count, i; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (tb->type.type); count = tb->generic_params ? mono_array_length (tb->generic_params) : 0; if (klass->generic_container || (count == 0)) return; g_assert (tb->generic_container && (tb->generic_container->owner.klass == klass)); klass->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer)); klass->generic_container->owner.klass = klass; klass->generic_container->type_argc = count; klass->generic_container->type_params = mono_image_alloc0 (klass->image, sizeof (MonoGenericParamFull) * count); klass->is_generic = 1; for (i = 0; i < count; i++) { MonoReflectionGenericParam *gparam = mono_array_get (tb->generic_params, gpointer, i); MonoGenericParamFull *param = (MonoGenericParamFull *) mono_reflection_type_get_handle ((MonoReflectionType*)gparam)->data.generic_param; klass->generic_container->type_params [i] = *param; /*Make sure we are a diferent type instance */ klass->generic_container->type_params [i].param.owner = klass->generic_container; klass->generic_container->type_params [i].info.pklass = NULL; klass->generic_container->type_params [i].info.flags = gparam->attrs; g_assert (klass->generic_container->type_params [i].param.owner); } klass->generic_container->context.class_inst = mono_get_shared_generic_inst (klass->generic_container); } /* * mono_reflection_create_internal_class: * @tb: a TypeBuilder object * * Actually create the MonoClass that is associated with the TypeBuilder. */ void mono_reflection_create_internal_class (MonoReflectionTypeBuilder *tb) { MonoClass *klass; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (tb->type.type); mono_loader_lock (); if (klass->enumtype && mono_class_enum_basetype (klass) == NULL) { MonoReflectionFieldBuilder *fb; MonoClass *ec; MonoType *enum_basetype; g_assert (tb->fields != NULL); g_assert (mono_array_length (tb->fields) >= 1); fb = mono_array_get (tb->fields, MonoReflectionFieldBuilder*, 0); if (!mono_type_is_valid_enum_basetype (mono_reflection_type_get_handle ((MonoReflectionType*)fb->type))) { mono_loader_unlock (); return; } enum_basetype = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); klass->element_class = mono_class_from_mono_type (enum_basetype); if (!klass->element_class) klass->element_class = mono_class_from_mono_type (enum_basetype); /* * get the element_class from the current corlib. */ ec = default_class_from_mono_type (enum_basetype); klass->instance_size = ec->instance_size; klass->size_inited = 1; /* * this is almost safe to do with enums and it's needed to be able * to create objects of the enum type (for use in SetConstant). */ /* FIXME: Does this mean enums can't have method overrides ? */ mono_class_setup_vtable_general (klass, NULL, 0); } mono_loader_unlock (); } static MonoMarshalSpec* mono_marshal_spec_from_builder (MonoImage *image, MonoAssembly *assembly, MonoReflectionMarshal *minfo) { MonoMarshalSpec *res; res = image_g_new0 (image, MonoMarshalSpec, 1); res->native = minfo->type; switch (minfo->type) { case MONO_NATIVE_LPARRAY: res->data.array_data.elem_type = minfo->eltype; if (minfo->has_size) { res->data.array_data.param_num = minfo->param_num; res->data.array_data.num_elem = minfo->count; res->data.array_data.elem_mult = minfo->param_num == -1 ? 0 : 1; } else { res->data.array_data.param_num = -1; res->data.array_data.num_elem = -1; res->data.array_data.elem_mult = -1; } break; case MONO_NATIVE_BYVALTSTR: case MONO_NATIVE_BYVALARRAY: res->data.array_data.num_elem = minfo->count; break; case MONO_NATIVE_CUSTOM: if (minfo->marshaltyperef) res->data.custom_data.custom_name = type_get_fully_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)minfo->marshaltyperef)); if (minfo->mcookie) res->data.custom_data.cookie = mono_string_to_utf8 (minfo->mcookie); break; default: break; } return res; } #endif /* !DISABLE_REFLECTION_EMIT */ MonoReflectionMarshal* mono_reflection_marshal_from_marshal_spec (MonoDomain *domain, MonoClass *klass, MonoMarshalSpec *spec) { static MonoClass *System_Reflection_Emit_UnmanagedMarshalClass; MonoReflectionMarshal *minfo; MonoType *mtype; if (!System_Reflection_Emit_UnmanagedMarshalClass) { System_Reflection_Emit_UnmanagedMarshalClass = mono_class_from_name ( mono_defaults.corlib, "System.Reflection.Emit", "UnmanagedMarshal"); g_assert (System_Reflection_Emit_UnmanagedMarshalClass); } minfo = (MonoReflectionMarshal*)mono_object_new (domain, System_Reflection_Emit_UnmanagedMarshalClass); minfo->type = spec->native; switch (minfo->type) { case MONO_NATIVE_LPARRAY: minfo->eltype = spec->data.array_data.elem_type; minfo->count = spec->data.array_data.num_elem; minfo->param_num = spec->data.array_data.param_num; break; case MONO_NATIVE_BYVALTSTR: case MONO_NATIVE_BYVALARRAY: minfo->count = spec->data.array_data.num_elem; break; case MONO_NATIVE_CUSTOM: if (spec->data.custom_data.custom_name) { mtype = mono_reflection_type_from_name (spec->data.custom_data.custom_name, klass->image); if (mtype) MONO_OBJECT_SETREF (minfo, marshaltyperef, mono_type_get_object (domain, mtype)); MONO_OBJECT_SETREF (minfo, marshaltype, mono_string_new (domain, spec->data.custom_data.custom_name)); } if (spec->data.custom_data.cookie) MONO_OBJECT_SETREF (minfo, mcookie, mono_string_new (domain, spec->data.custom_data.cookie)); break; default: break; } return minfo; } #ifndef DISABLE_REFLECTION_EMIT static MonoMethod* reflection_methodbuilder_to_mono_method (MonoClass *klass, ReflectionMethodBuilder *rmb, MonoMethodSignature *sig) { MonoError error; MonoMethod *m; MonoMethodNormal *pm; MonoMarshalSpec **specs; MonoReflectionMethodAux *method_aux; MonoImage *image; gboolean dynamic; int i; mono_error_init (&error); /* * Methods created using a MethodBuilder should have their memory allocated * inside the image mempool, while dynamic methods should have their memory * malloc'd. */ dynamic = rmb->refs != NULL; image = dynamic ? NULL : klass->image; if (!dynamic) g_assert (!klass->generic_class); mono_loader_lock (); if ((rmb->attrs & METHOD_ATTRIBUTE_PINVOKE_IMPL) || (rmb->iattrs & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)) m = (MonoMethod *)image_g_new0 (image, MonoMethodPInvoke, 1); else if (rmb->refs) m = (MonoMethod *)image_g_new0 (image, MonoMethodWrapper, 1); else m = (MonoMethod *)image_g_new0 (image, MonoMethodNormal, 1); pm = (MonoMethodNormal*)m; m->dynamic = dynamic; m->slot = -1; m->flags = rmb->attrs; m->iflags = rmb->iattrs; m->name = mono_string_to_utf8_image (image, rmb->name, &error); g_assert (mono_error_ok (&error)); m->klass = klass; m->signature = sig; m->skip_visibility = rmb->skip_visibility; if (rmb->table_idx) m->token = MONO_TOKEN_METHOD_DEF | (*rmb->table_idx); if (m->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { if (klass == mono_defaults.string_class && !strcmp (m->name, ".ctor")) m->string_ctor = 1; m->signature->pinvoke = 1; } else if (m->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { m->signature->pinvoke = 1; method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1); method_aux->dllentry = rmb->dllentry ? mono_string_to_utf8_image (image, rmb->dllentry, &error) : image_strdup (image, m->name); g_assert (mono_error_ok (&error)); method_aux->dll = mono_string_to_utf8_image (image, rmb->dll, &error); g_assert (mono_error_ok (&error)); ((MonoMethodPInvoke*)m)->piflags = (rmb->native_cc << 8) | (rmb->charset ? (rmb->charset - 1) * 2 : 0) | rmb->extra_flags; if (klass->image->dynamic) g_hash_table_insert (((MonoDynamicImage*)klass->image)->method_aux_hash, m, method_aux); mono_loader_unlock (); return m; } else if (!(m->flags & METHOD_ATTRIBUTE_ABSTRACT) && !(m->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) { MonoMethodHeader *header; guint32 code_size; gint32 max_stack, i; gint32 num_locals = 0; gint32 num_clauses = 0; guint8 *code; if (rmb->ilgen) { code = mono_array_addr (rmb->ilgen->code, guint8, 0); code_size = rmb->ilgen->code_len; max_stack = rmb->ilgen->max_stack; num_locals = rmb->ilgen->locals ? mono_array_length (rmb->ilgen->locals) : 0; if (rmb->ilgen->ex_handlers) num_clauses = method_count_clauses (rmb->ilgen); } else { if (rmb->code) { code = mono_array_addr (rmb->code, guint8, 0); code_size = mono_array_length (rmb->code); /* we probably need to run a verifier on the code... */ max_stack = 8; } else { code = NULL; code_size = 0; max_stack = 8; } } header = image_g_malloc0 (image, MONO_SIZEOF_METHOD_HEADER + num_locals * sizeof (MonoType*)); header->code_size = code_size; header->code = image_g_malloc (image, code_size); memcpy ((char*)header->code, code, code_size); header->max_stack = max_stack; header->init_locals = rmb->init_locals; header->num_locals = num_locals; for (i = 0; i < num_locals; ++i) { MonoReflectionLocalBuilder *lb = mono_array_get (rmb->ilgen->locals, MonoReflectionLocalBuilder*, i); header->locals [i] = image_g_new0 (image, MonoType, 1); memcpy (header->locals [i], mono_reflection_type_get_handle ((MonoReflectionType*)lb->type), MONO_SIZEOF_TYPE); } header->num_clauses = num_clauses; if (num_clauses) { header->clauses = method_encode_clauses (image, (MonoDynamicImage*)klass->image, rmb->ilgen, num_clauses); } pm->header = header; } if (rmb->generic_params) { int count = mono_array_length (rmb->generic_params); MonoGenericContainer *container; container = rmb->generic_container; if (container) { m->is_generic = TRUE; mono_method_set_generic_container (m, container); } container->type_argc = count; container->type_params = image_g_new0 (image, MonoGenericParamFull, count); container->owner.method = m; for (i = 0; i < count; i++) { MonoReflectionGenericParam *gp = mono_array_get (rmb->generic_params, MonoReflectionGenericParam*, i); MonoGenericParamFull *param = (MonoGenericParamFull *) mono_reflection_type_get_handle ((MonoReflectionType*)gp)->data.generic_param; container->type_params [i] = *param; } if (klass->generic_container) { container->parent = klass->generic_container; container->context.class_inst = klass->generic_container->context.class_inst; } container->context.method_inst = mono_get_shared_generic_inst (container); } if (rmb->refs) { MonoMethodWrapper *mw = (MonoMethodWrapper*)m; int i; void **data; m->wrapper_type = MONO_WRAPPER_DYNAMIC_METHOD; mw->method_data = data = image_g_new (image, gpointer, rmb->nrefs + 1); data [0] = GUINT_TO_POINTER (rmb->nrefs); for (i = 0; i < rmb->nrefs; ++i) data [i + 1] = rmb->refs [i]; } method_aux = NULL; /* Parameter info */ if (rmb->pinfo) { if (!method_aux) method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1); method_aux->param_names = image_g_new0 (image, char *, mono_method_signature (m)->param_count + 1); for (i = 0; i <= m->signature->param_count; ++i) { MonoReflectionParamBuilder *pb; if ((pb = mono_array_get (rmb->pinfo, MonoReflectionParamBuilder*, i))) { if ((i > 0) && (pb->attrs)) { /* Make a copy since it might point to a shared type structure */ m->signature->params [i - 1] = mono_metadata_type_dup (klass->image, m->signature->params [i - 1]); m->signature->params [i - 1]->attrs = pb->attrs; } if (pb->attrs & PARAM_ATTRIBUTE_HAS_DEFAULT) { MonoDynamicImage *assembly; guint32 idx, def_type, len; char *p; const char *p2; if (!method_aux->param_defaults) { method_aux->param_defaults = image_g_new0 (image, guint8*, m->signature->param_count + 1); method_aux->param_default_types = image_g_new0 (image, guint32, m->signature->param_count + 1); } assembly = (MonoDynamicImage*)klass->image; idx = encode_constant (assembly, pb->def_value, &def_type); /* Copy the data from the blob since it might get realloc-ed */ p = assembly->blob.data + idx; len = mono_metadata_decode_blob_size (p, &p2); len += p2 - p; method_aux->param_defaults [i] = image_g_malloc (image, len); method_aux->param_default_types [i] = def_type; memcpy ((gpointer)method_aux->param_defaults [i], p, len); } if (pb->name) { method_aux->param_names [i] = mono_string_to_utf8_image (image, pb->name, &error); g_assert (mono_error_ok (&error)); } if (pb->cattrs) { if (!method_aux->param_cattr) method_aux->param_cattr = image_g_new0 (image, MonoCustomAttrInfo*, m->signature->param_count + 1); method_aux->param_cattr [i] = mono_custom_attrs_from_builders (image, klass->image, pb->cattrs); } } } } /* Parameter marshalling */ specs = NULL; if (rmb->pinfo) for (i = 0; i < mono_array_length (rmb->pinfo); ++i) { MonoReflectionParamBuilder *pb; if ((pb = mono_array_get (rmb->pinfo, MonoReflectionParamBuilder*, i))) { if (pb->marshal_info) { if (specs == NULL) specs = image_g_new0 (image, MonoMarshalSpec*, sig->param_count + 1); specs [pb->position] = mono_marshal_spec_from_builder (image, klass->image->assembly, pb->marshal_info); } } } if (specs != NULL) { if (!method_aux) method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1); method_aux->param_marshall = specs; } if (klass->image->dynamic && method_aux) g_hash_table_insert (((MonoDynamicImage*)klass->image)->method_aux_hash, m, method_aux); mono_loader_unlock (); return m; } static MonoMethod* ctorbuilder_to_mono_method (MonoClass *klass, MonoReflectionCtorBuilder* mb) { ReflectionMethodBuilder rmb; MonoMethodSignature *sig; mono_loader_lock (); sig = ctor_builder_to_signature (klass->image, mb); mono_loader_unlock (); reflection_methodbuilder_from_ctor_builder (&rmb, mb); mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig); mono_save_custom_attrs (klass->image, mb->mhandle, mb->cattrs); /* If we are in a generic class, we might be called multiple times from inflate_method */ if (!((MonoDynamicImage*)(MonoDynamicImage*)klass->image)->save && !klass->generic_container) { /* ilgen is no longer needed */ mb->ilgen = NULL; } return mb->mhandle; } static MonoMethod* methodbuilder_to_mono_method (MonoClass *klass, MonoReflectionMethodBuilder* mb) { ReflectionMethodBuilder rmb; MonoMethodSignature *sig; mono_loader_lock (); sig = method_builder_to_signature (klass->image, mb); mono_loader_unlock (); reflection_methodbuilder_from_method_builder (&rmb, mb); mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig); mono_save_custom_attrs (klass->image, mb->mhandle, mb->cattrs); /* If we are in a generic class, we might be called multiple times from inflate_method */ if (!((MonoDynamicImage*)(MonoDynamicImage*)klass->image)->save && !klass->generic_container) { /* ilgen is no longer needed */ mb->ilgen = NULL; } return mb->mhandle; } static MonoClassField* fieldbuilder_to_mono_class_field (MonoClass *klass, MonoReflectionFieldBuilder* fb) { MonoClassField *field; MonoType *custom; field = g_new0 (MonoClassField, 1); field->name = mono_string_to_utf8 (fb->name); if (fb->attrs || fb->modreq || fb->modopt) { field->type = mono_metadata_type_dup (NULL, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type)); field->type->attrs = fb->attrs; g_assert (klass->image->dynamic); custom = add_custom_modifiers ((MonoDynamicImage*)klass->image, field->type, fb->modreq, fb->modopt); g_free (field->type); field->type = custom; } else { field->type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); } if (fb->offset != -1) field->offset = fb->offset; field->parent = klass; mono_save_custom_attrs (klass->image, field, fb->cattrs); // FIXME: Can't store fb->def_value/RVA, is it needed for field_on_insts ? return field; } #endif MonoType* mono_reflection_bind_generic_parameters (MonoReflectionType *type, int type_argc, MonoType **types) { MonoClass *klass; MonoReflectionTypeBuilder *tb = NULL; gboolean is_dynamic = FALSE; MonoDomain *domain; MonoClass *geninst; mono_loader_lock (); domain = mono_object_domain (type); if (!strcmp (((MonoObject *) type)->vtable->klass->name, "TypeBuilder")) { tb = (MonoReflectionTypeBuilder *) type; is_dynamic = TRUE; } else if (!strcmp (((MonoObject *) type)->vtable->klass->name, "MonoGenericClass")) { MonoReflectionGenericClass *rgi = (MonoReflectionGenericClass *) type; tb = rgi->generic_type; is_dynamic = TRUE; } /* FIXME: fix the CreateGenericParameters protocol to avoid the two stage setup of TypeBuilders */ if (tb && tb->generic_container) mono_reflection_create_generic_class (tb); klass = mono_class_from_mono_type (mono_reflection_type_get_handle (type)); if (!klass->generic_container) { mono_loader_unlock (); return NULL; } if (klass->wastypebuilder) { tb = (MonoReflectionTypeBuilder *) klass->reflection_info; is_dynamic = TRUE; } mono_loader_unlock (); geninst = mono_class_bind_generic_parameters (klass, type_argc, types, is_dynamic); return &geninst->byval_arg; } MonoClass* mono_class_bind_generic_parameters (MonoClass *klass, int type_argc, MonoType **types, gboolean is_dynamic) { MonoGenericClass *gclass; MonoGenericInst *inst; g_assert (klass->generic_container); inst = mono_metadata_get_generic_inst (type_argc, types); gclass = mono_metadata_lookup_generic_class (klass, inst, is_dynamic); return mono_generic_class_get_class (gclass); } MonoReflectionMethod* mono_reflection_bind_generic_method_parameters (MonoReflectionMethod *rmethod, MonoArray *types) { MonoClass *klass; MonoMethod *method, *inflated; MonoMethodInflated *imethod; MonoGenericContext tmp_context; MonoGenericInst *ginst; MonoType **type_argv; int count, i; MONO_ARCH_SAVE_REGS; if (!strcmp (rmethod->object.vtable->klass->name, "MethodBuilder")) { #ifndef DISABLE_REFLECTION_EMIT MonoReflectionMethodBuilder *mb = NULL; MonoReflectionTypeBuilder *tb; MonoClass *klass; mb = (MonoReflectionMethodBuilder *) rmethod; tb = (MonoReflectionTypeBuilder *) mb->type; klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb)); method = methodbuilder_to_mono_method (klass, mb); #else g_assert_not_reached (); method = NULL; #endif } else { method = rmethod->method; } klass = method->klass; if (method->is_inflated) method = ((MonoMethodInflated *) method)->declaring; count = mono_method_signature (method)->generic_param_count; if (count != mono_array_length (types)) return NULL; type_argv = g_new0 (MonoType *, count); for (i = 0; i < count; i++) { MonoReflectionType *garg = mono_array_get (types, gpointer, i); type_argv [i] = mono_reflection_type_get_handle (garg); } ginst = mono_metadata_get_generic_inst (count, type_argv); g_free (type_argv); tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL; tmp_context.method_inst = ginst; inflated = mono_class_inflate_generic_method (method, &tmp_context); imethod = (MonoMethodInflated *) inflated; if (method->klass->image->dynamic) { MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image; /* * This table maps metadata structures representing inflated methods/fields * to the reflection objects representing their generic definitions. */ mono_loader_lock (); mono_g_hash_table_insert (image->generic_def_objects, imethod, rmethod); mono_loader_unlock (); } if (!mono_verifier_is_method_valid_generic_instantiation (inflated)) mono_raise_exception (mono_get_exception_argument ("typeArguments", "Invalid generic arguments")); return mono_method_get_object (mono_object_domain (rmethod), inflated, NULL); } #ifndef DISABLE_REFLECTION_EMIT static MonoMethod * inflate_mono_method (MonoClass *klass, MonoMethod *method, MonoObject *obj) { MonoMethodInflated *imethod; MonoGenericContext *context; int i; /* * With generic code sharing the klass might not be inflated. * This can happen because classes inflated with their own * type arguments are "normalized" to the uninflated class. */ if (!klass->generic_class) return method; context = mono_class_get_context (klass); if (klass->method.count) { /* Find the already created inflated method */ for (i = 0; i < klass->method.count; ++i) { g_assert (klass->methods [i]->is_inflated); if (((MonoMethodInflated*)klass->methods [i])->declaring == method) break; } g_assert (i < klass->method.count); imethod = (MonoMethodInflated*)klass->methods [i]; } else { imethod = (MonoMethodInflated *) mono_class_inflate_generic_method_full (method, klass, context); } if (method->is_generic && method->klass->image->dynamic) { MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image; mono_loader_lock (); mono_g_hash_table_insert (image->generic_def_objects, imethod, obj); mono_loader_unlock (); } return (MonoMethod *) imethod; } static MonoMethod * inflate_method (MonoReflectionGenericClass *type, MonoObject *obj) { MonoMethod *method; MonoClass *gklass; gklass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)type->generic_type)); if (!strcmp (obj->vtable->klass->name, "MethodBuilder")) if (((MonoReflectionMethodBuilder*)obj)->mhandle) method = ((MonoReflectionMethodBuilder*)obj)->mhandle; else method = methodbuilder_to_mono_method (gklass, (MonoReflectionMethodBuilder *) obj); else if (!strcmp (obj->vtable->klass->name, "ConstructorBuilder")) method = ctorbuilder_to_mono_method (gklass, (MonoReflectionCtorBuilder *) obj); else if (!strcmp (obj->vtable->klass->name, "MonoMethod") || !strcmp (obj->vtable->klass->name, "MonoCMethod")) method = ((MonoReflectionMethod *) obj)->method; else { method = NULL; /* prevent compiler warning */ g_error ("can't handle type %s", obj->vtable->klass->name); } return inflate_mono_method (mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)type)), method, obj); } /*TODO avoid saving custom attrs for generic classes as it's enough to have them on the generic type definition.*/ void mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *methods, MonoArray *ctors, MonoArray *fields, MonoArray *properties, MonoArray *events) { MonoGenericClass *gclass; MonoDynamicGenericClass *dgclass; MonoClass *klass, *gklass; MonoType *gtype; int i; MONO_ARCH_SAVE_REGS; gtype = mono_reflection_type_get_handle ((MonoReflectionType*)type); klass = mono_class_from_mono_type (gtype); g_assert (gtype->type == MONO_TYPE_GENERICINST); gclass = gtype->data.generic_class; g_assert (gclass->is_dynamic); dgclass = (MonoDynamicGenericClass *) gclass; if (dgclass->initialized) return; gklass = gclass->container_class; mono_class_init (gklass); dgclass->count_methods = methods ? mono_array_length (methods) : 0; dgclass->count_ctors = ctors ? mono_array_length (ctors) : 0; dgclass->count_fields = fields ? mono_array_length (fields) : 0; dgclass->count_properties = properties ? mono_array_length (properties) : 0; dgclass->count_events = events ? mono_array_length (events) : 0; dgclass->methods = g_new0 (MonoMethod *, dgclass->count_methods); dgclass->ctors = g_new0 (MonoMethod *, dgclass->count_ctors); dgclass->fields = g_new0 (MonoClassField, dgclass->count_fields); dgclass->properties = g_new0 (MonoProperty, dgclass->count_properties); dgclass->events = g_new0 (MonoEvent, dgclass->count_events); dgclass->field_objects = g_new0 (MonoObject*, dgclass->count_fields); dgclass->field_generic_types = g_new0 (MonoType*, dgclass->count_fields); for (i = 0; i < dgclass->count_methods; i++) { MonoObject *obj = mono_array_get (methods, gpointer, i); dgclass->methods [i] = inflate_method (type, obj); } for (i = 0; i < dgclass->count_ctors; i++) { MonoObject *obj = mono_array_get (ctors, gpointer, i); dgclass->ctors [i] = inflate_method (type, obj); } for (i = 0; i < dgclass->count_fields; i++) { MonoObject *obj = mono_array_get (fields, gpointer, i); MonoClassField *field, *inflated_field = NULL; if (!strcmp (obj->vtable->klass->name, "FieldBuilder")) inflated_field = field = fieldbuilder_to_mono_class_field (klass, (MonoReflectionFieldBuilder *) obj); else if (!strcmp (obj->vtable->klass->name, "MonoField")) field = ((MonoReflectionField *) obj)->field; else { field = NULL; /* prevent compiler warning */ g_assert_not_reached (); } dgclass->fields [i] = *field; dgclass->fields [i].parent = klass; dgclass->fields [i].type = mono_class_inflate_generic_type ( field->type, mono_generic_class_get_context ((MonoGenericClass *) dgclass)); dgclass->field_generic_types [i] = field->type; MOVING_GC_REGISTER (&dgclass->field_objects [i]); dgclass->field_objects [i] = obj; if (inflated_field) { g_free (inflated_field); } else { dgclass->fields [i].name = g_strdup (dgclass->fields [i].name); } } for (i = 0; i < dgclass->count_properties; i++) { MonoObject *obj = mono_array_get (properties, gpointer, i); MonoProperty *property = &dgclass->properties [i]; if (!strcmp (obj->vtable->klass->name, "PropertyBuilder")) { MonoReflectionPropertyBuilder *pb = (MonoReflectionPropertyBuilder *) obj; property->parent = klass; property->attrs = pb->attrs; property->name = mono_string_to_utf8 (pb->name); if (pb->get_method) property->get = inflate_method (type, (MonoObject *) pb->get_method); if (pb->set_method) property->set = inflate_method (type, (MonoObject *) pb->set_method); } else if (!strcmp (obj->vtable->klass->name, "MonoProperty")) { *property = *((MonoReflectionProperty *) obj)->property; property->name = g_strdup (property->name); if (property->get) property->get = inflate_mono_method (klass, property->get, NULL); if (property->set) property->set = inflate_mono_method (klass, property->set, NULL); } else g_assert_not_reached (); } for (i = 0; i < dgclass->count_events; i++) { MonoObject *obj = mono_array_get (events, gpointer, i); MonoEvent *event = &dgclass->events [i]; if (!strcmp (obj->vtable->klass->name, "EventBuilder")) { MonoReflectionEventBuilder *eb = (MonoReflectionEventBuilder *) obj; event->parent = klass; event->attrs = eb->attrs; event->name = mono_string_to_utf8 (eb->name); if (eb->add_method) event->add = inflate_method (type, (MonoObject *) eb->add_method); if (eb->remove_method) event->remove = inflate_method (type, (MonoObject *) eb->remove_method); } else if (!strcmp (obj->vtable->klass->name, "MonoEvent")) { *event = *((MonoReflectionMonoEvent *) obj)->event; event->name = g_strdup (event->name); if (event->add) event->add = inflate_mono_method (klass, event->add, NULL); if (event->remove) event->remove = inflate_mono_method (klass, event->remove, NULL); } else g_assert_not_reached (); } dgclass->initialized = TRUE; } static void ensure_generic_class_runtime_vtable (MonoClass *klass) { MonoClass *gklass = klass->generic_class->container_class; int i; if (klass->wastypebuilder) return; ensure_runtime_vtable (gklass); klass->method.count = gklass->method.count; klass->methods = mono_image_alloc (klass->image, sizeof (MonoMethod*) * (klass->method.count + 1)); for (i = 0; i < klass->method.count; i++) { klass->methods [i] = mono_class_inflate_generic_method_full ( gklass->methods [i], klass, mono_class_get_context (klass)); } klass->interface_count = gklass->interface_count; klass->interfaces = mono_image_alloc (klass->image, sizeof (MonoClass*) * klass->interface_count); for (i = 0; i < klass->interface_count; ++i) { MonoType *iface_type = mono_class_inflate_generic_type (&gklass->interfaces [i]->byval_arg, mono_class_get_context (klass)); klass->interfaces [i] = mono_class_from_mono_type (iface_type); mono_metadata_free_type (iface_type); ensure_runtime_vtable (klass->interfaces [i]); } klass->interfaces_inited = 1; /*We can only finish with this klass once it's parent has as well*/ if (gklass->wastypebuilder) klass->wastypebuilder = TRUE; return; } static void ensure_runtime_vtable (MonoClass *klass) { MonoReflectionTypeBuilder *tb = klass->reflection_info; int i, num, j; if (!klass->image->dynamic || (!tb && !klass->generic_class) || klass->wastypebuilder) return; if (klass->parent) ensure_runtime_vtable (klass->parent); if (tb) { num = tb->ctors? mono_array_length (tb->ctors): 0; num += tb->num_methods; klass->method.count = num; klass->methods = mono_image_alloc (klass->image, sizeof (MonoMethod*) * num); num = tb->ctors? mono_array_length (tb->ctors): 0; for (i = 0; i < num; ++i) klass->methods [i] = ctorbuilder_to_mono_method (klass, mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i)); num = tb->num_methods; j = i; for (i = 0; i < num; ++i) klass->methods [j++] = methodbuilder_to_mono_method (klass, mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i)); if (tb->interfaces) { klass->interface_count = mono_array_length (tb->interfaces); klass->interfaces = mono_image_alloc (klass->image, sizeof (MonoClass*) * klass->interface_count); for (i = 0; i < klass->interface_count; ++i) { MonoType *iface = mono_type_array_get_and_resolve (tb->interfaces, i); klass->interfaces [i] = mono_class_from_mono_type (iface); ensure_runtime_vtable (klass->interfaces [i]); } klass->interfaces_inited = 1; } } else if (klass->generic_class){ ensure_generic_class_runtime_vtable (klass); } if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) { for (i = 0; i < klass->method.count; ++i) klass->methods [i]->slot = i; mono_class_setup_interface_offsets (klass); mono_class_setup_interface_id (klass); } /* * The generic vtable is needed even if image->run is not set since some * runtime code like ves_icall_Type_GetMethodsByName depends on * method->slot being defined. */ /* * tb->methods could not be freed since it is used for determining * overrides during dynamic vtable construction. */ } static MonoMethod* mono_reflection_method_get_handle (MonoObject *method) { MonoClass *class = mono_object_class (method); if (is_sr_mono_method (class) || is_sr_mono_generic_method (class)) { MonoReflectionMethod *sr_method = (MonoReflectionMethod*)method; return sr_method->method; } if (is_sre_method_builder (class)) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)method; return mb->mhandle; } if (is_sre_method_on_tb_inst (class)) { MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)method; MonoMethod *result; /*FIXME move this to a proper method and unify with resolve_object*/ if (m->method_args) { result = mono_reflection_method_on_tb_inst_get_handle (m); } else { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst); MonoClass *inflated_klass = mono_class_from_mono_type (type); MonoMethod *mono_method; if (is_sre_method_builder (mono_object_class (m->mb))) mono_method = ((MonoReflectionMethodBuilder *)m->mb)->mhandle; else if (is_sr_mono_method (mono_object_class (m->mb))) mono_method = ((MonoReflectionMethod *)m->mb)->method; else g_error ("resolve_object:: can't handle a MTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (m->mb))); result = inflate_mono_method (inflated_klass, mono_method, (MonoObject*)m->mb); } return result; } g_error ("Can't handle methods of type %s:%s", class->name_space, class->name); return NULL; } void mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides) { MonoReflectionTypeBuilder *tb; int i, onum; *overrides = NULL; *num_overrides = 0; g_assert (klass->image->dynamic); if (!klass->reflection_info) return; g_assert (strcmp (((MonoObject*)klass->reflection_info)->vtable->klass->name, "TypeBuilder") == 0); tb = (MonoReflectionTypeBuilder*)klass->reflection_info; onum = 0; if (tb->methods) { for (i = 0; i < tb->num_methods; ++i) { MonoReflectionMethodBuilder *mb = mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i); if (mb->override_method) onum ++; } } if (onum) { *overrides = g_new0 (MonoMethod*, onum * 2); onum = 0; for (i = 0; i < tb->num_methods; ++i) { MonoReflectionMethodBuilder *mb = mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i); if (mb->override_method) { (*overrides) [onum * 2] = mono_reflection_method_get_handle ((MonoObject *)mb->override_method); (*overrides) [onum * 2 + 1] = mb->mhandle; g_assert (mb->mhandle); onum ++; } } } *num_overrides = onum; } static void typebuilder_setup_fields (MonoClass *klass, MonoError *error) { MonoReflectionTypeBuilder *tb = klass->reflection_info; MonoReflectionFieldBuilder *fb; MonoClassField *field; MonoImage *image = klass->image; const char *p, *p2; int i; guint32 len, idx, real_size = 0; klass->field.count = tb->num_fields; klass->field.first = 0; mono_error_init (error); if (tb->class_size) { g_assert ((tb->packing_size & 0xfffffff0) == 0); klass->packing_size = tb->packing_size; real_size = klass->instance_size + tb->class_size; } if (!klass->field.count) { klass->instance_size = MAX (klass->instance_size, real_size); return; } klass->fields = image_g_new0 (image, MonoClassField, klass->field.count); mono_class_alloc_ext (klass); klass->ext->field_def_values = image_g_new0 (image, MonoFieldDefaultValue, klass->field.count); /* This is, guess what, a hack. The issue is that the runtime doesn't know how to setup the fields of a typebuider and crash. On the static path no field class is resolved, only types are built. This is the right thing to do but we suck. Setting size_inited is harmless because we're doing the same job as mono_class_setup_fields anyway. */ klass->size_inited = 1; for (i = 0; i < klass->field.count; ++i) { fb = mono_array_get (tb->fields, gpointer, i); field = &klass->fields [i]; field->name = mono_string_to_utf8_image (image, fb->name, error); if (!mono_error_ok (error)) return; if (fb->attrs) { field->type = mono_metadata_type_dup (klass->image, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type)); field->type->attrs = fb->attrs; } else { field->type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); } if ((fb->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) && fb->rva_data) klass->ext->field_def_values [i].data = mono_array_addr (fb->rva_data, char, 0); if (fb->offset != -1) field->offset = fb->offset; field->parent = klass; fb->handle = field; mono_save_custom_attrs (klass->image, field, fb->cattrs); if (fb->def_value) { MonoDynamicImage *assembly = (MonoDynamicImage*)klass->image; field->type->attrs |= FIELD_ATTRIBUTE_HAS_DEFAULT; idx = encode_constant (assembly, fb->def_value, &klass->ext->field_def_values [i].def_type); /* Copy the data from the blob since it might get realloc-ed */ p = assembly->blob.data + idx; len = mono_metadata_decode_blob_size (p, &p2); len += p2 - p; klass->ext->field_def_values [i].data = mono_image_alloc (image, len); memcpy ((gpointer)klass->ext->field_def_values [i].data, p, len); } } klass->instance_size = MAX (klass->instance_size, real_size); mono_class_layout_fields (klass); } static void typebuilder_setup_properties (MonoClass *klass, MonoError *error) { MonoReflectionTypeBuilder *tb = klass->reflection_info; MonoReflectionPropertyBuilder *pb; MonoImage *image = klass->image; MonoProperty *properties; int i; mono_error_init (error); if (!klass->ext) klass->ext = image_g_new0 (image, MonoClassExt, 1); klass->ext->property.count = tb->properties ? mono_array_length (tb->properties) : 0; klass->ext->property.first = 0; properties = image_g_new0 (image, MonoProperty, klass->ext->property.count); klass->ext->properties = properties; for (i = 0; i < klass->ext->property.count; ++i) { pb = mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i); properties [i].parent = klass; properties [i].attrs = pb->attrs; properties [i].name = mono_string_to_utf8_image (image, pb->name, error); if (!mono_error_ok (error)) return; if (pb->get_method) properties [i].get = pb->get_method->mhandle; if (pb->set_method) properties [i].set = pb->set_method->mhandle; mono_save_custom_attrs (klass->image, &properties [i], pb->cattrs); } } MonoReflectionEvent * mono_reflection_event_builder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb) { MonoEvent *event = g_new0 (MonoEvent, 1); MonoClass *klass; int j; klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb)); event->parent = klass; event->attrs = eb->attrs; event->name = mono_string_to_utf8 (eb->name); if (eb->add_method) event->add = eb->add_method->mhandle; if (eb->remove_method) event->remove = eb->remove_method->mhandle; if (eb->raise_method) event->raise = eb->raise_method->mhandle; if (eb->other_methods) { event->other = g_new0 (MonoMethod*, mono_array_length (eb->other_methods) + 1); for (j = 0; j < mono_array_length (eb->other_methods); ++j) { MonoReflectionMethodBuilder *mb = mono_array_get (eb->other_methods, MonoReflectionMethodBuilder*, j); event->other [j] = mb->mhandle; } } return mono_event_get_object (mono_object_domain (tb), klass, event); } static void typebuilder_setup_events (MonoClass *klass, MonoError *error) { MonoReflectionTypeBuilder *tb = klass->reflection_info; MonoReflectionEventBuilder *eb; MonoImage *image = klass->image; MonoEvent *events; int i, j; mono_error_init (error); if (!klass->ext) klass->ext = image_g_new0 (image, MonoClassExt, 1); klass->ext->event.count = tb->events ? mono_array_length (tb->events) : 0; klass->ext->event.first = 0; events = image_g_new0 (image, MonoEvent, klass->ext->event.count); klass->ext->events = events; for (i = 0; i < klass->ext->event.count; ++i) { eb = mono_array_get (tb->events, MonoReflectionEventBuilder*, i); events [i].parent = klass; events [i].attrs = eb->attrs; events [i].name = mono_string_to_utf8_image (image, eb->name, error); if (!mono_error_ok (error)) return; if (eb->add_method) events [i].add = eb->add_method->mhandle; if (eb->remove_method) events [i].remove = eb->remove_method->mhandle; if (eb->raise_method) events [i].raise = eb->raise_method->mhandle; if (eb->other_methods) { events [i].other = image_g_new0 (image, MonoMethod*, mono_array_length (eb->other_methods) + 1); for (j = 0; j < mono_array_length (eb->other_methods); ++j) { MonoReflectionMethodBuilder *mb = mono_array_get (eb->other_methods, MonoReflectionMethodBuilder*, j); events [i].other [j] = mb->mhandle; } } mono_save_custom_attrs (klass->image, &events [i], eb->cattrs); } } static gboolean remove_instantiations_of (gpointer key, gpointer value, gpointer user_data) { MonoType *type = (MonoType*)key; MonoClass *klass = (MonoClass*)user_data; if ((type->type == MONO_TYPE_GENERICINST) && (type->data.generic_class->container_class == klass)) return TRUE; else return FALSE; } static void check_array_for_usertypes (MonoArray *arr) { int i; if (!arr) return; for (i = 0; i < mono_array_length (arr); ++i) RESOLVE_ARRAY_TYPE_ELEMENT (arr, i); } MonoReflectionType* mono_reflection_create_runtime_class (MonoReflectionTypeBuilder *tb) { MonoError error; MonoClass *klass; MonoDomain* domain; MonoReflectionType* res; int i, j; MONO_ARCH_SAVE_REGS; domain = mono_object_domain (tb); klass = mono_class_from_mono_type (tb->type.type); /* * Check for user defined Type subclasses. */ RESOLVE_TYPE (tb->parent); check_array_for_usertypes (tb->interfaces); if (tb->fields) { for (i = 0; i < mono_array_length (tb->fields); ++i) { MonoReflectionFieldBuilder *fb = mono_array_get (tb->fields, gpointer, i); if (fb) { RESOLVE_TYPE (fb->type); check_array_for_usertypes (fb->modreq); check_array_for_usertypes (fb->modopt); if (fb->marshal_info && fb->marshal_info->marshaltyperef) RESOLVE_TYPE (fb->marshal_info->marshaltyperef); } } } if (tb->methods) { for (i = 0; i < mono_array_length (tb->methods); ++i) { MonoReflectionMethodBuilder *mb = mono_array_get (tb->methods, gpointer, i); if (mb) { RESOLVE_TYPE (mb->rtype); check_array_for_usertypes (mb->return_modreq); check_array_for_usertypes (mb->return_modopt); check_array_for_usertypes (mb->parameters); if (mb->param_modreq) for (j = 0; j < mono_array_length (mb->param_modreq); ++j) check_array_for_usertypes (mono_array_get (mb->param_modreq, MonoArray*, j)); if (mb->param_modopt) for (j = 0; j < mono_array_length (mb->param_modopt); ++j) check_array_for_usertypes (mono_array_get (mb->param_modopt, MonoArray*, j)); } } } if (tb->ctors) { for (i = 0; i < mono_array_length (tb->ctors); ++i) { MonoReflectionCtorBuilder *mb = mono_array_get (tb->ctors, gpointer, i); if (mb) { check_array_for_usertypes (mb->parameters); if (mb->param_modreq) for (j = 0; j < mono_array_length (mb->param_modreq); ++j) check_array_for_usertypes (mono_array_get (mb->param_modreq, MonoArray*, j)); if (mb->param_modopt) for (j = 0; j < mono_array_length (mb->param_modopt); ++j) check_array_for_usertypes (mono_array_get (mb->param_modopt, MonoArray*, j)); } } } mono_save_custom_attrs (klass->image, klass, tb->cattrs); /* * we need to lock the domain because the lock will be taken inside * So, we need to keep the locking order correct. */ mono_loader_lock (); mono_domain_lock (domain); if (klass->wastypebuilder) { mono_domain_unlock (domain); mono_loader_unlock (); return mono_type_get_object (mono_object_domain (tb), &klass->byval_arg); } /* * Fields to set in klass: * the various flags: delegate/unicode/contextbound etc. */ klass->flags = tb->attrs; klass->has_cctor = 1; klass->has_finalize = 1; #if 0 if (!((MonoDynamicImage*)klass->image)->run) { if (klass->generic_container) { /* FIXME: The code below can't handle generic classes */ klass->wastypebuilder = TRUE; mono_loader_unlock (); mono_domain_unlock (domain); return mono_type_get_object (mono_object_domain (tb), &klass->byval_arg); } } #endif /* enums are done right away */ if (!klass->enumtype) ensure_runtime_vtable (klass); if (tb->subtypes) { for (i = 0; i < mono_array_length (tb->subtypes); ++i) { MonoReflectionTypeBuilder *subtb = mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i); mono_class_alloc_ext (klass); klass->ext->nested_classes = g_list_prepend_image (klass->image, klass->ext->nested_classes, mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)subtb))); } } klass->nested_classes_inited = TRUE; /* fields and object layout */ if (klass->parent) { if (!klass->parent->size_inited) mono_class_init (klass->parent); klass->instance_size = klass->parent->instance_size; klass->sizes.class_size = 0; klass->min_align = klass->parent->min_align; /* if the type has no fields we won't call the field_setup * routine which sets up klass->has_references. */ klass->has_references |= klass->parent->has_references; } else { klass->instance_size = sizeof (MonoObject); klass->min_align = 1; } /* FIXME: handle packing_size and instance_size */ typebuilder_setup_fields (klass, &error); if (!mono_error_ok (&error)) goto failure; typebuilder_setup_properties (klass, &error); if (!mono_error_ok (&error)) goto failure; typebuilder_setup_events (klass, &error); if (!mono_error_ok (&error)) goto failure; klass->wastypebuilder = TRUE; /* * If we are a generic TypeBuilder, there might be instantiations in the type cache * which have type System.Reflection.MonoGenericClass, but after the type is created, * we want to return normal System.MonoType objects, so clear these out from the cache. */ if (domain->type_hash && klass->generic_container) mono_g_hash_table_foreach_remove (domain->type_hash, remove_instantiations_of, klass); mono_domain_unlock (domain); mono_loader_unlock (); if (klass->enumtype && !mono_class_is_valid_enum (klass)) { mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL); mono_raise_exception (mono_get_exception_type_load (tb->name, NULL)); } res = mono_type_get_object (mono_object_domain (tb), &klass->byval_arg); g_assert (res != (MonoReflectionType*)tb); return res; failure: mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL); klass->wastypebuilder = TRUE; mono_domain_unlock (domain); mono_loader_unlock (); mono_error_raise_exception (&error); return NULL; } void mono_reflection_initialize_generic_parameter (MonoReflectionGenericParam *gparam) { MonoGenericParamFull *param; MonoImage *image; MonoClass *pklass; MONO_ARCH_SAVE_REGS; param = g_new0 (MonoGenericParamFull, 1); if (gparam->mbuilder) { if (!gparam->mbuilder->generic_container) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)gparam->mbuilder->type; MonoClass *klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb)); gparam->mbuilder->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer)); gparam->mbuilder->generic_container->is_method = TRUE; /* * Cannot set owner.method, since the MonoMethod is not created yet. * Set the image field instead, so type_in_image () works. */ gparam->mbuilder->generic_container->image = klass->image; } param->param.owner = gparam->mbuilder->generic_container; } else if (gparam->tbuilder) { if (!gparam->tbuilder->generic_container) { MonoClass *klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)gparam->tbuilder)); gparam->tbuilder->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer)); gparam->tbuilder->generic_container->owner.klass = klass; } param->param.owner = gparam->tbuilder->generic_container; } param->info.name = mono_string_to_utf8 (gparam->name); param->param.num = gparam->index; image = &gparam->tbuilder->module->dynamic_image->image; pklass = mono_class_from_generic_parameter ((MonoGenericParam *) param, image, gparam->mbuilder != NULL); gparam->type.type = &pklass->byval_arg; MOVING_GC_REGISTER (&pklass->reflection_info); pklass->reflection_info = gparam; /* FIXME: GC pin gparam */ mono_image_lock (image); image->reflection_info_unregister_classes = g_slist_prepend (image->reflection_info_unregister_classes, pklass); mono_image_unlock (image); } MonoArray * mono_reflection_sighelper_get_signature_local (MonoReflectionSigHelper *sig) { MonoReflectionModuleBuilder *module = sig->module; MonoDynamicImage *assembly = module != NULL ? module->dynamic_image : NULL; guint32 na = sig->arguments ? mono_array_length (sig->arguments) : 0; guint32 buflen, i; MonoArray *result; SigBuffer buf; check_array_for_usertypes (sig->arguments); sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x07); sigbuffer_add_value (&buf, na); if (assembly != NULL){ for (i = 0; i < na; ++i) { MonoReflectionType *type = mono_array_get (sig->arguments, MonoReflectionType*, i); encode_reflection_type (assembly, type, &buf); } } buflen = buf.p - buf.buf; result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen); memcpy (mono_array_addr (result, char, 0), buf.buf, buflen); sigbuffer_free (&buf); return result; } MonoArray * mono_reflection_sighelper_get_signature_field (MonoReflectionSigHelper *sig) { MonoDynamicImage *assembly = sig->module->dynamic_image; guint32 na = sig->arguments ? mono_array_length (sig->arguments) : 0; guint32 buflen, i; MonoArray *result; SigBuffer buf; check_array_for_usertypes (sig->arguments); sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x06); for (i = 0; i < na; ++i) { MonoReflectionType *type = mono_array_get (sig->arguments, MonoReflectionType*, i); encode_reflection_type (assembly, type, &buf); } buflen = buf.p - buf.buf; result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen); memcpy (mono_array_addr (result, char, 0), buf.buf, buflen); sigbuffer_free (&buf); return result; } typedef struct { MonoMethod *handle; MonoDomain *domain; } DynamicMethodReleaseData; static MonoReferenceQueue *dynamic_method_queue; void mono_reflection_shutdown (void) { MonoReferenceQueue *queue; mono_loader_lock (); queue = dynamic_method_queue; dynamic_method_queue = NULL; if (queue) mono_gc_reference_queue_free (queue); mono_loader_unlock (); } static void free_dynamic_method (void *dynamic_method) { DynamicMethodReleaseData *data = dynamic_method; mono_runtime_free_method (data->domain, data->handle); g_free (data); } void mono_reflection_create_dynamic_method (MonoReflectionDynamicMethod *mb) { MonoReferenceQueue *queue; MonoMethod *handle; DynamicMethodReleaseData *release_data; ReflectionMethodBuilder rmb; MonoMethodSignature *sig; MonoClass *klass; GSList *l; int i; if (mono_runtime_is_shutting_down ()) mono_raise_exception (mono_get_exception_invalid_operation ("")); if (!(queue = dynamic_method_queue)) { mono_loader_lock (); if (!(queue = dynamic_method_queue)) queue = dynamic_method_queue = mono_gc_reference_queue_new (free_dynamic_method); mono_loader_unlock (); } sig = dynamic_method_to_signature (mb); reflection_methodbuilder_from_dynamic_method (&rmb, mb); /* * Resolve references. */ /* * Every second entry in the refs array is reserved for storing handle_class, * which is needed by the ldtoken implementation in the JIT. */ rmb.nrefs = mb->nrefs; rmb.refs = g_new0 (gpointer, mb->nrefs + 1); for (i = 0; i < mb->nrefs; i += 2) { MonoClass *handle_class; gpointer ref; MonoObject *obj = mono_array_get (mb->refs, MonoObject*, i); if (strcmp (obj->vtable->klass->name, "DynamicMethod") == 0) { MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)obj; /* * The referenced DynamicMethod should already be created by the managed * code, except in the case of circular references. In that case, we store * method in the refs array, and fix it up later when the referenced * DynamicMethod is created. */ if (method->mhandle) { ref = method->mhandle; } else { /* FIXME: GC object stored in unmanaged memory */ ref = method; /* FIXME: GC object stored in unmanaged memory */ method->referenced_by = g_slist_append (method->referenced_by, mb); } handle_class = mono_defaults.methodhandle_class; } else { MonoException *ex = NULL; ref = resolve_object (mb->module->image, obj, &handle_class, NULL); if (!ref) ex = mono_get_exception_type_load (NULL, NULL); else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) ex = mono_security_core_clr_ensure_dynamic_method_resolved_object (ref, handle_class); if (ex) { g_free (rmb.refs); mono_raise_exception (ex); return; } } rmb.refs [i] = ref; /* FIXME: GC object stored in unmanaged memory (change also resolve_object() signature) */ rmb.refs [i + 1] = handle_class; } klass = mb->owner ? mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)mb->owner)) : mono_defaults.object_class; mb->mhandle = handle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig); release_data = g_new (DynamicMethodReleaseData, 1); release_data->handle = handle; release_data->domain = mono_object_get_domain ((MonoObject*)mb); if (!mono_gc_reference_queue_add (queue, (MonoObject*)mb, release_data)) g_free (release_data); /* Fix up refs entries pointing at us */ for (l = mb->referenced_by; l; l = l->next) { MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)l->data; MonoMethodWrapper *wrapper = (MonoMethodWrapper*)method->mhandle; gpointer *data; g_assert (method->mhandle); data = (gpointer*)wrapper->method_data; for (i = 0; i < GPOINTER_TO_UINT (data [0]); i += 2) { if ((data [i + 1] == mb) && (data [i + 1 + 1] == mono_defaults.methodhandle_class)) data [i + 1] = mb->mhandle; } } g_slist_free (mb->referenced_by); g_free (rmb.refs); /* ilgen is no longer needed */ mb->ilgen = NULL; } #endif /* DISABLE_REFLECTION_EMIT */ /** * * mono_reflection_is_valid_dynamic_token: * * Returns TRUE if token is valid. * */ gboolean mono_reflection_is_valid_dynamic_token (MonoDynamicImage *image, guint32 token) { return mono_g_hash_table_lookup (image->tokens, GUINT_TO_POINTER (token)) != NULL; } #ifndef DISABLE_REFLECTION_EMIT /** * mono_reflection_lookup_dynamic_token: * * Finish the Builder object pointed to by TOKEN and return the corresponding * runtime structure. If HANDLE_CLASS is not NULL, it is set to the class required by * mono_ldtoken. If valid_token is TRUE, assert if it is not found in the token->object * mapping table. * * LOCKING: Take the loader lock */ gpointer mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context) { MonoDynamicImage *assembly = (MonoDynamicImage*)image; MonoObject *obj; MonoClass *klass; mono_loader_lock (); obj = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token)); mono_loader_unlock (); if (!obj) { if (valid_token) g_error ("Could not find required dynamic token 0x%08x", token); else return NULL; } if (!handle_class) handle_class = &klass; return resolve_object (image, obj, handle_class, context); } /* * ensure_complete_type: * * Ensure that KLASS is completed if it is a dynamic type, or references * dynamic types. */ static void ensure_complete_type (MonoClass *klass) { if (klass->image->dynamic && !klass->wastypebuilder) { MonoReflectionTypeBuilder *tb = klass->reflection_info; mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); // Asserting here could break a lot of code //g_assert (klass->wastypebuilder); } if (klass->generic_class) { MonoGenericInst *inst = klass->generic_class->context.class_inst; int i; for (i = 0; i < inst->type_argc; ++i) { ensure_complete_type (mono_class_from_mono_type (inst->type_argv [i])); } } } static gpointer resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context) { gpointer result = NULL; if (strcmp (obj->vtable->klass->name, "String") == 0) { result = mono_string_intern ((MonoString*)obj); *handle_class = mono_defaults.string_class; g_assert (result); } else if (strcmp (obj->vtable->klass->name, "MonoType") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj); if (context) { MonoType *inflated = mono_class_inflate_generic_type (type, context); result = mono_class_from_mono_type (inflated); mono_metadata_free_type (inflated); } else { result = mono_class_from_mono_type (type); } *handle_class = mono_defaults.typehandle_class; g_assert (result); } else if (strcmp (obj->vtable->klass->name, "MonoMethod") == 0 || strcmp (obj->vtable->klass->name, "MonoCMethod") == 0 || strcmp (obj->vtable->klass->name, "MonoGenericCMethod") == 0 || strcmp (obj->vtable->klass->name, "MonoGenericMethod") == 0) { result = ((MonoReflectionMethod*)obj)->method; if (context) result = mono_class_inflate_generic_method (result, context); *handle_class = mono_defaults.methodhandle_class; g_assert (result); } else if (strcmp (obj->vtable->klass->name, "MethodBuilder") == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)obj; result = mb->mhandle; if (!result) { /* Type is not yet created */ MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type; mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); /* * Hopefully this has been filled in by calling CreateType() on the * TypeBuilder. */ /* * TODO: This won't work if the application finishes another * TypeBuilder instance instead of this one. */ result = mb->mhandle; } if (context) result = mono_class_inflate_generic_method (result, context); *handle_class = mono_defaults.methodhandle_class; } else if (strcmp (obj->vtable->klass->name, "ConstructorBuilder") == 0) { MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder*)obj; result = cb->mhandle; if (!result) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)cb->type; mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); result = cb->mhandle; } if (context) result = mono_class_inflate_generic_method (result, context); *handle_class = mono_defaults.methodhandle_class; } else if (strcmp (obj->vtable->klass->name, "MonoField") == 0) { MonoClassField *field = ((MonoReflectionField*)obj)->field; ensure_complete_type (field->parent); if (context) { MonoType *inflated = mono_class_inflate_generic_type (&field->parent->byval_arg, context); MonoClass *class = mono_class_from_mono_type (inflated); MonoClassField *inflated_field; gpointer iter = NULL; mono_metadata_free_type (inflated); while ((inflated_field = mono_class_get_fields (class, &iter))) { if (!strcmp (field->name, inflated_field->name)) break; } g_assert (inflated_field && !strcmp (field->name, inflated_field->name)); result = inflated_field; } else { result = field; } *handle_class = mono_defaults.fieldhandle_class; g_assert (result); } else if (strcmp (obj->vtable->klass->name, "FieldBuilder") == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder*)obj; result = fb->handle; if (!result) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)fb->typeb; mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); result = fb->handle; } if (fb->handle && fb->handle->parent->generic_container) { MonoClass *klass = fb->handle->parent; MonoType *type = mono_class_inflate_generic_type (&klass->byval_arg, context); MonoClass *inflated = mono_class_from_mono_type (type); result = mono_class_get_field_from_name (inflated, mono_field_get_name (fb->handle)); g_assert (result); mono_metadata_free_type (type); } *handle_class = mono_defaults.fieldhandle_class; } else if (strcmp (obj->vtable->klass->name, "TypeBuilder") == 0) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)obj; MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)tb); MonoClass *klass; klass = type->data.klass; if (klass->wastypebuilder) { /* Already created */ result = klass; } else { mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); result = type->data.klass; g_assert (result); } *handle_class = mono_defaults.typehandle_class; } else if (strcmp (obj->vtable->klass->name, "SignatureHelper") == 0) { MonoReflectionSigHelper *helper = (MonoReflectionSigHelper*)obj; MonoMethodSignature *sig; int nargs, i; if (helper->arguments) nargs = mono_array_length (helper->arguments); else nargs = 0; sig = mono_metadata_signature_alloc (image, nargs); sig->explicit_this = helper->call_conv & 64 ? 1 : 0; sig->hasthis = helper->call_conv & 32 ? 1 : 0; if (helper->unmanaged_call_conv) { /* unmanaged */ sig->call_convention = helper->unmanaged_call_conv - 1; sig->pinvoke = TRUE; } else if (helper->call_conv & 0x02) { sig->call_convention = MONO_CALL_VARARG; } else { sig->call_convention = MONO_CALL_DEFAULT; } sig->param_count = nargs; /* TODO: Copy type ? */ sig->ret = helper->return_type->type; for (i = 0; i < nargs; ++i) sig->params [i] = mono_type_array_get_and_resolve (helper->arguments, i); result = sig; *handle_class = NULL; } else if (strcmp (obj->vtable->klass->name, "DynamicMethod") == 0) { MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)obj; /* Already created by the managed code */ g_assert (method->mhandle); result = method->mhandle; *handle_class = mono_defaults.methodhandle_class; } else if (strcmp (obj->vtable->klass->name, "GenericTypeParameterBuilder") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj); type = mono_class_inflate_generic_type (type, context); result = mono_class_from_mono_type (type); *handle_class = mono_defaults.typehandle_class; g_assert (result); mono_metadata_free_type (type); } else if (strcmp (obj->vtable->klass->name, "MonoGenericClass") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj); type = mono_class_inflate_generic_type (type, context); result = mono_class_from_mono_type (type); *handle_class = mono_defaults.typehandle_class; g_assert (result); mono_metadata_free_type (type); } else if (strcmp (obj->vtable->klass->name, "FieldOnTypeBuilderInst") == 0) { MonoReflectionFieldOnTypeBuilderInst *f = (MonoReflectionFieldOnTypeBuilderInst*)obj; MonoClass *inflated; MonoType *type; type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)f->inst), context); inflated = mono_class_from_mono_type (type); g_assert (f->fb->handle); result = mono_class_get_field_from_name (inflated, mono_field_get_name (f->fb->handle)); g_assert (result); mono_metadata_free_type (type); *handle_class = mono_defaults.fieldhandle_class; } else if (strcmp (obj->vtable->klass->name, "ConstructorOnTypeBuilderInst") == 0) { MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)obj; MonoType *type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)c->inst), context); MonoClass *inflated_klass = mono_class_from_mono_type (type); g_assert (c->cb->mhandle); result = inflate_mono_method (inflated_klass, c->cb->mhandle, (MonoObject*)c->cb); *handle_class = mono_defaults.methodhandle_class; mono_metadata_free_type (type); } else if (strcmp (obj->vtable->klass->name, "MethodOnTypeBuilderInst") == 0) { MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)obj; if (m->method_args) { result = mono_reflection_method_on_tb_inst_get_handle (m); } else { MonoType *type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)m->inst), context); MonoClass *inflated_klass = mono_class_from_mono_type (type); g_assert (m->mb->mhandle); result = inflate_mono_method (inflated_klass, m->mb->mhandle, (MonoObject*)m->mb); mono_metadata_free_type (type); } *handle_class = mono_defaults.methodhandle_class; } else if (strcmp (obj->vtable->klass->name, "MonoArrayMethod") == 0) { MonoReflectionArrayMethod *m = (MonoReflectionArrayMethod*)obj; MonoType *mtype; MonoClass *klass; MonoMethod *method; gpointer iter; char *name; mtype = mono_reflection_type_get_handle (m->parent); klass = mono_class_from_mono_type (mtype); /* Find the method */ name = mono_string_to_utf8 (m->name); iter = NULL; while ((method = mono_class_get_methods (klass, &iter))) { if (!strcmp (method->name, name)) break; } g_free (name); // FIXME: g_assert (method); // FIXME: Check parameters/return value etc. match result = method; *handle_class = mono_defaults.methodhandle_class; } else if (is_sre_array (mono_object_get_class(obj)) || is_sre_byref (mono_object_get_class(obj)) || is_sre_pointer (mono_object_get_class(obj))) { MonoReflectionType *ref_type = (MonoReflectionType *)obj; MonoType *type = mono_reflection_type_get_handle (ref_type); result = mono_class_from_mono_type (type); *handle_class = mono_defaults.typehandle_class; } else { g_print ("%s\n", obj->vtable->klass->name); g_assert_not_reached (); } return result; } #else /* DISABLE_REFLECTION_EMIT */ MonoArray* mono_reflection_get_custom_attrs_blob (MonoReflectionAssembly *assembly, MonoObject *ctor, MonoArray *ctorArgs, MonoArray *properties, MonoArray *propValues, MonoArray *fields, MonoArray* fieldValues) { g_assert_not_reached (); return NULL; } void mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); } void mono_reflection_setup_generic_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); } void mono_reflection_create_generic_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); } void mono_reflection_create_internal_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); } void mono_image_basic_init (MonoReflectionAssemblyBuilder *assemblyb) { g_error ("This mono runtime was configured with --enable-minimal=reflection_emit, so System.Reflection.Emit is not supported."); } void mono_image_module_basic_init (MonoReflectionModuleBuilder *moduleb) { g_assert_not_reached (); } void mono_image_set_wrappers_type (MonoReflectionModuleBuilder *moduleb, MonoReflectionType *type) { g_assert_not_reached (); } MonoReflectionModule * mono_image_load_module_dynamic (MonoReflectionAssemblyBuilder *ab, MonoString *fileName) { g_assert_not_reached (); return NULL; } guint32 mono_image_insert_string (MonoReflectionModuleBuilder *module, MonoString *str) { g_assert_not_reached (); return 0; } guint32 mono_image_create_method_token (MonoDynamicImage *assembly, MonoObject *obj, MonoArray *opt_param_types) { g_assert_not_reached (); return 0; } guint32 mono_image_create_token (MonoDynamicImage *assembly, MonoObject *obj, gboolean create_methodspec, gboolean register_token) { g_assert_not_reached (); return 0; } void mono_image_register_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj) { } void mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *methods, MonoArray *ctors, MonoArray *fields, MonoArray *properties, MonoArray *events) { g_assert_not_reached (); } void mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides) { *overrides = NULL; *num_overrides = 0; } MonoReflectionEvent * mono_reflection_event_builder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb) { g_assert_not_reached (); return NULL; } MonoReflectionType* mono_reflection_create_runtime_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); return NULL; } void mono_reflection_initialize_generic_parameter (MonoReflectionGenericParam *gparam) { g_assert_not_reached (); } MonoArray * mono_reflection_sighelper_get_signature_local (MonoReflectionSigHelper *sig) { g_assert_not_reached (); return NULL; } MonoArray * mono_reflection_sighelper_get_signature_field (MonoReflectionSigHelper *sig) { g_assert_not_reached (); return NULL; } void mono_reflection_create_dynamic_method (MonoReflectionDynamicMethod *mb) { } gpointer mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context) { return NULL; } MonoType* mono_reflection_type_get_handle (MonoReflectionType* ref) { if (!ref) return NULL; return ref->type; } #endif /* DISABLE_REFLECTION_EMIT */ /* SECURITY_ACTION_* are defined in mono/metadata/tabledefs.h */ const static guint32 declsec_flags_map[] = { 0x00000000, /* empty */ MONO_DECLSEC_FLAG_REQUEST, /* SECURITY_ACTION_REQUEST (x01) */ MONO_DECLSEC_FLAG_DEMAND, /* SECURITY_ACTION_DEMAND (x02) */ MONO_DECLSEC_FLAG_ASSERT, /* SECURITY_ACTION_ASSERT (x03) */ MONO_DECLSEC_FLAG_DENY, /* SECURITY_ACTION_DENY (x04) */ MONO_DECLSEC_FLAG_PERMITONLY, /* SECURITY_ACTION_PERMITONLY (x05) */ MONO_DECLSEC_FLAG_LINKDEMAND, /* SECURITY_ACTION_LINKDEMAND (x06) */ MONO_DECLSEC_FLAG_INHERITANCEDEMAND, /* SECURITY_ACTION_INHERITANCEDEMAND (x07) */ MONO_DECLSEC_FLAG_REQUEST_MINIMUM, /* SECURITY_ACTION_REQUEST_MINIMUM (x08) */ MONO_DECLSEC_FLAG_REQUEST_OPTIONAL, /* SECURITY_ACTION_REQUEST_OPTIONAL (x09) */ MONO_DECLSEC_FLAG_REQUEST_REFUSE, /* SECURITY_ACTION_REQUEST_REFUSE (x0A) */ MONO_DECLSEC_FLAG_PREJIT_GRANT, /* SECURITY_ACTION_PREJIT_GRANT (x0B) */ MONO_DECLSEC_FLAG_PREJIT_DENY, /* SECURITY_ACTION_PREJIT_DENY (x0C) */ MONO_DECLSEC_FLAG_NONCAS_DEMAND, /* SECURITY_ACTION_NONCAS_DEMAND (x0D) */ MONO_DECLSEC_FLAG_NONCAS_LINKDEMAND, /* SECURITY_ACTION_NONCAS_LINKDEMAND (x0E) */ MONO_DECLSEC_FLAG_NONCAS_INHERITANCEDEMAND, /* SECURITY_ACTION_NONCAS_INHERITANCEDEMAND (x0F) */ MONO_DECLSEC_FLAG_LINKDEMAND_CHOICE, /* SECURITY_ACTION_LINKDEMAND_CHOICE (x10) */ MONO_DECLSEC_FLAG_INHERITANCEDEMAND_CHOICE, /* SECURITY_ACTION_INHERITANCEDEMAND_CHOICE (x11) */ MONO_DECLSEC_FLAG_DEMAND_CHOICE, /* SECURITY_ACTION_DEMAND_CHOICE (x12) */ }; /* * Returns flags that includes all available security action associated to the handle. * @token: metadata token (either for a class or a method) * @image: image where resides the metadata. */ static guint32 mono_declsec_get_flags (MonoImage *image, guint32 token) { int index = mono_metadata_declsec_from_index (image, token); MonoTableInfo *t = &image->tables [MONO_TABLE_DECLSECURITY]; guint32 result = 0; guint32 action; int i; /* HasSecurity can be present for other, not specially encoded, attributes, e.g. SuppressUnmanagedCodeSecurityAttribute */ if (index < 0) return 0; for (i = index; i < t->rows; i++) { guint32 cols [MONO_DECL_SECURITY_SIZE]; mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE); if (cols [MONO_DECL_SECURITY_PARENT] != token) break; action = cols [MONO_DECL_SECURITY_ACTION]; if ((action >= MONO_DECLSEC_ACTION_MIN) && (action <= MONO_DECLSEC_ACTION_MAX)) { result |= declsec_flags_map [action]; } else { g_assert_not_reached (); } } return result; } /* * Get the security actions (in the form of flags) associated with the specified method. * * @method: The method for which we want the declarative security flags. * Return the declarative security flags for the method (only). * * Note: To keep MonoMethod size down we do not cache the declarative security flags * (except for the stack modifiers which are kept in the MonoJitInfo structure) */ guint32 mono_declsec_flags_from_method (MonoMethod *method) { if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { /* FIXME: No cache (for the moment) */ guint32 idx = mono_method_get_index (method); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_METHODDEF; return mono_declsec_get_flags (method->klass->image, idx); } return 0; } /* * Get the security actions (in the form of flags) associated with the specified class. * * @klass: The class for which we want the declarative security flags. * Return the declarative security flags for the class. * * Note: We cache the flags inside the MonoClass structure as this will get * called very often (at least for each method). */ guint32 mono_declsec_flags_from_class (MonoClass *klass) { if (klass->flags & TYPE_ATTRIBUTE_HAS_SECURITY) { if (!klass->ext || !klass->ext->declsec_flags) { guint32 idx; idx = mono_metadata_token_index (klass->type_token); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_TYPEDEF; mono_loader_lock (); mono_class_alloc_ext (klass); mono_loader_unlock (); /* we cache the flags on classes */ klass->ext->declsec_flags = mono_declsec_get_flags (klass->image, idx); } return klass->ext->declsec_flags; } return 0; } /* * Get the security actions (in the form of flags) associated with the specified assembly. * * @assembly: The assembly for which we want the declarative security flags. * Return the declarative security flags for the assembly. */ guint32 mono_declsec_flags_from_assembly (MonoAssembly *assembly) { guint32 idx = 1; /* there is only one assembly */ idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY; return mono_declsec_get_flags (assembly->image, idx); } /* * Fill actions for the specific index (which may either be an encoded class token or * an encoded method token) from the metadata image. * Returns TRUE if some actions requiring code generation are present, FALSE otherwise. */ static MonoBoolean fill_actions_from_index (MonoImage *image, guint32 token, MonoDeclSecurityActions* actions, guint32 id_std, guint32 id_noncas, guint32 id_choice) { MonoBoolean result = FALSE; MonoTableInfo *t; guint32 cols [MONO_DECL_SECURITY_SIZE]; int index = mono_metadata_declsec_from_index (image, token); int i; t = &image->tables [MONO_TABLE_DECLSECURITY]; for (i = index; i < t->rows; i++) { mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE); if (cols [MONO_DECL_SECURITY_PARENT] != token) return result; /* if present only replace (class) permissions with method permissions */ /* if empty accept either class or method permissions */ if (cols [MONO_DECL_SECURITY_ACTION] == id_std) { if (!actions->demand.blob) { const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]); actions->demand.index = cols [MONO_DECL_SECURITY_PERMISSIONSET]; actions->demand.blob = (char*) (blob + 2); actions->demand.size = mono_metadata_decode_blob_size (blob, &blob); result = TRUE; } } else if (cols [MONO_DECL_SECURITY_ACTION] == id_noncas) { if (!actions->noncasdemand.blob) { const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]); actions->noncasdemand.index = cols [MONO_DECL_SECURITY_PERMISSIONSET]; actions->noncasdemand.blob = (char*) (blob + 2); actions->noncasdemand.size = mono_metadata_decode_blob_size (blob, &blob); result = TRUE; } } else if (cols [MONO_DECL_SECURITY_ACTION] == id_choice) { if (!actions->demandchoice.blob) { const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]); actions->demandchoice.index = cols [MONO_DECL_SECURITY_PERMISSIONSET]; actions->demandchoice.blob = (char*) (blob + 2); actions->demandchoice.size = mono_metadata_decode_blob_size (blob, &blob); result = TRUE; } } } return result; } static MonoBoolean mono_declsec_get_class_demands_params (MonoClass *klass, MonoDeclSecurityActions* demands, guint32 id_std, guint32 id_noncas, guint32 id_choice) { guint32 idx = mono_metadata_token_index (klass->type_token); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_TYPEDEF; return fill_actions_from_index (klass->image, idx, demands, id_std, id_noncas, id_choice); } static MonoBoolean mono_declsec_get_method_demands_params (MonoMethod *method, MonoDeclSecurityActions* demands, guint32 id_std, guint32 id_noncas, guint32 id_choice) { guint32 idx = mono_method_get_index (method); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_METHODDEF; return fill_actions_from_index (method->klass->image, idx, demands, id_std, id_noncas, id_choice); } /* * Collect all actions (that requires to generate code in mini) assigned for * the specified method. * Note: Don't use the content of actions if the function return FALSE. */ MonoBoolean mono_declsec_get_demands (MonoMethod *method, MonoDeclSecurityActions* demands) { guint32 mask = MONO_DECLSEC_FLAG_DEMAND | MONO_DECLSEC_FLAG_NONCAS_DEMAND | MONO_DECLSEC_FLAG_DEMAND_CHOICE; MonoBoolean result = FALSE; guint32 flags; /* quick exit if no declarative security is present in the metadata */ if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows) return FALSE; /* we want the original as the wrapper is "free" of the security informations */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) { method = mono_marshal_method_from_wrapper (method); if (!method) return FALSE; } /* First we look for method-level attributes */ if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { mono_class_init (method->klass); memset (demands, 0, sizeof (MonoDeclSecurityActions)); result = mono_declsec_get_method_demands_params (method, demands, SECURITY_ACTION_DEMAND, SECURITY_ACTION_NONCASDEMAND, SECURITY_ACTION_DEMANDCHOICE); } /* Here we use (or create) the class declarative cache to look for demands */ flags = mono_declsec_flags_from_class (method->klass); if (flags & mask) { if (!result) { mono_class_init (method->klass); memset (demands, 0, sizeof (MonoDeclSecurityActions)); } result |= mono_declsec_get_class_demands_params (method->klass, demands, SECURITY_ACTION_DEMAND, SECURITY_ACTION_NONCASDEMAND, SECURITY_ACTION_DEMANDCHOICE); } /* The boolean return value is used as a shortcut in case nothing needs to be generated (e.g. LinkDemand[Choice] and InheritanceDemand[Choice]) */ return result; } /* * Collect all Link actions: LinkDemand, NonCasLinkDemand and LinkDemandChoice (2.0). * * Note: Don't use the content of actions if the function return FALSE. */ MonoBoolean mono_declsec_get_linkdemands (MonoMethod *method, MonoDeclSecurityActions* klass, MonoDeclSecurityActions *cmethod) { MonoBoolean result = FALSE; guint32 flags; /* quick exit if no declarative security is present in the metadata */ if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows) return FALSE; /* we want the original as the wrapper is "free" of the security informations */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) { method = mono_marshal_method_from_wrapper (method); if (!method) return FALSE; } /* results are independant - zeroize both */ memset (cmethod, 0, sizeof (MonoDeclSecurityActions)); memset (klass, 0, sizeof (MonoDeclSecurityActions)); /* First we look for method-level attributes */ if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { mono_class_init (method->klass); result = mono_declsec_get_method_demands_params (method, cmethod, SECURITY_ACTION_LINKDEMAND, SECURITY_ACTION_NONCASLINKDEMAND, SECURITY_ACTION_LINKDEMANDCHOICE); } /* Here we use (or create) the class declarative cache to look for demands */ flags = mono_declsec_flags_from_class (method->klass); if (flags & (MONO_DECLSEC_FLAG_LINKDEMAND | MONO_DECLSEC_FLAG_NONCAS_LINKDEMAND | MONO_DECLSEC_FLAG_LINKDEMAND_CHOICE)) { mono_class_init (method->klass); result |= mono_declsec_get_class_demands_params (method->klass, klass, SECURITY_ACTION_LINKDEMAND, SECURITY_ACTION_NONCASLINKDEMAND, SECURITY_ACTION_LINKDEMANDCHOICE); } return result; } /* * Collect all Inherit actions: InheritanceDemand, NonCasInheritanceDemand and InheritanceDemandChoice (2.0). * * @klass The inherited class - this is the class that provides the security check (attributes) * @demans * return TRUE if inheritance demands (any kind) are present, FALSE otherwise. * * Note: Don't use the content of actions if the function return FALSE. */ MonoBoolean mono_declsec_get_inheritdemands_class (MonoClass *klass, MonoDeclSecurityActions* demands) { MonoBoolean result = FALSE; guint32 flags; /* quick exit if no declarative security is present in the metadata */ if (!klass->image->tables [MONO_TABLE_DECLSECURITY].rows) return FALSE; /* Here we use (or create) the class declarative cache to look for demands */ flags = mono_declsec_flags_from_class (klass); if (flags & (MONO_DECLSEC_FLAG_INHERITANCEDEMAND | MONO_DECLSEC_FLAG_NONCAS_INHERITANCEDEMAND | MONO_DECLSEC_FLAG_INHERITANCEDEMAND_CHOICE)) { mono_class_init (klass); memset (demands, 0, sizeof (MonoDeclSecurityActions)); result |= mono_declsec_get_class_demands_params (klass, demands, SECURITY_ACTION_INHERITDEMAND, SECURITY_ACTION_NONCASINHERITANCE, SECURITY_ACTION_INHERITDEMANDCHOICE); } return result; } /* * Collect all Inherit actions: InheritanceDemand, NonCasInheritanceDemand and InheritanceDemandChoice (2.0). * * Note: Don't use the content of actions if the function return FALSE. */ MonoBoolean mono_declsec_get_inheritdemands_method (MonoMethod *method, MonoDeclSecurityActions* demands) { /* quick exit if no declarative security is present in the metadata */ if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows) return FALSE; /* we want the original as the wrapper is "free" of the security informations */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) { method = mono_marshal_method_from_wrapper (method); if (!method) return FALSE; } if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { mono_class_init (method->klass); memset (demands, 0, sizeof (MonoDeclSecurityActions)); return mono_declsec_get_method_demands_params (method, demands, SECURITY_ACTION_INHERITDEMAND, SECURITY_ACTION_NONCASINHERITANCE, SECURITY_ACTION_INHERITDEMANDCHOICE); } return FALSE; } static MonoBoolean get_declsec_action (MonoImage *image, guint32 token, guint32 action, MonoDeclSecurityEntry *entry) { guint32 cols [MONO_DECL_SECURITY_SIZE]; MonoTableInfo *t; int i; int index = mono_metadata_declsec_from_index (image, token); if (index == -1) return FALSE; t = &image->tables [MONO_TABLE_DECLSECURITY]; for (i = index; i < t->rows; i++) { mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE); /* shortcut - index are ordered */ if (token != cols [MONO_DECL_SECURITY_PARENT]) return FALSE; if (cols [MONO_DECL_SECURITY_ACTION] == action) { const char *metadata = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]); entry->blob = (char*) (metadata + 2); entry->size = mono_metadata_decode_blob_size (metadata, &metadata); return TRUE; } } return FALSE; } MonoBoolean mono_declsec_get_method_action (MonoMethod *method, guint32 action, MonoDeclSecurityEntry *entry) { if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { guint32 idx = mono_method_get_index (method); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_METHODDEF; return get_declsec_action (method->klass->image, idx, action, entry); } return FALSE; } MonoBoolean mono_declsec_get_class_action (MonoClass *klass, guint32 action, MonoDeclSecurityEntry *entry) { /* use cache */ guint32 flags = mono_declsec_flags_from_class (klass); if (declsec_flags_map [action] & flags) { guint32 idx = mono_metadata_token_index (klass->type_token); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_TYPEDEF; return get_declsec_action (klass->image, idx, action, entry); } return FALSE; } MonoBoolean mono_declsec_get_assembly_action (MonoAssembly *assembly, guint32 action, MonoDeclSecurityEntry *entry) { guint32 idx = 1; /* there is only one assembly */ idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY; return get_declsec_action (assembly->image, idx, action, entry); } gboolean mono_reflection_call_is_assignable_to (MonoClass *klass, MonoClass *oklass) { MonoObject *res, *exc; void *params [1]; static MonoClass *System_Reflection_Emit_TypeBuilder = NULL; static MonoMethod *method = NULL; if (!System_Reflection_Emit_TypeBuilder) { System_Reflection_Emit_TypeBuilder = mono_class_from_name (mono_defaults.corlib, "System.Reflection.Emit", "TypeBuilder"); g_assert (System_Reflection_Emit_TypeBuilder); } if (method == NULL) { method = mono_class_get_method_from_name (System_Reflection_Emit_TypeBuilder, "IsAssignableTo", 1); g_assert (method); } /* * The result of mono_type_get_object () might be a System.MonoType but we * need a TypeBuilder so use klass->reflection_info. */ g_assert (klass->reflection_info); g_assert (!strcmp (((MonoObject*)(klass->reflection_info))->vtable->klass->name, "TypeBuilder")); params [0] = mono_type_get_object (mono_domain_get (), &oklass->byval_arg); res = mono_runtime_invoke (method, (MonoObject*)(klass->reflection_info), params, &exc); if (exc) return FALSE; else return *(MonoBoolean*)mono_object_unbox (res); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3431_3
crossvul-cpp_data_bad_1493_0
/* crypto/bn/bn_gf2m.c */ /* ==================================================================== * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. * * The Elliptic Curve Public-Key Crypto Library (ECC Code) included * herein is developed by SUN MICROSYSTEMS, INC., and is contributed * to the OpenSSL project. * * The ECC Code is licensed pursuant to the OpenSSL open source * license provided below. * * In addition, Sun covenants to all licensees who provide a reciprocal * covenant with respect to their own patents if any, not to sue under * current and future patent claims necessarily infringed by the making, * using, practicing, selling, offering for sale and/or otherwise * disposing of the ECC Code as delivered hereunder (or portions thereof), * provided that such covenant shall not apply: * 1) for code that a licensee deletes from the ECC Code; * 2) separates from the ECC Code; or * 3) for infringements caused by: * i) the modification of the ECC Code or * ii) the combination of the ECC Code with other software or * devices where such combination causes the infringement. * * The software is originally written by Sheueling Chang Shantz and * Douglas Stebila of Sun Microsystems Laboratories. * */ /* * NOTE: This file is licensed pursuant to the OpenSSL license below and may * be modified; but after modifications, the above covenant may no longer * apply! In such cases, the corresponding paragraph ["In addition, Sun * covenants ... causes the infringement."] and this note can be edited out; * but please keep the Sun copyright notice and attribution. */ /* ==================================================================== * Copyright (c) 1998-2002 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@openssl.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ #include <assert.h> #include <limits.h> #include <stdio.h> #include "internal/cryptlib.h" #include "bn_lcl.h" #ifndef OPENSSL_NO_EC2M /* * Maximum number of iterations before BN_GF2m_mod_solve_quad_arr should * fail. */ # define MAX_ITERATIONS 50 static const BN_ULONG SQR_tb[16] = { 0, 1, 4, 5, 16, 17, 20, 21, 64, 65, 68, 69, 80, 81, 84, 85 }; /* Platform-specific macros to accelerate squaring. */ # if defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG) # define SQR1(w) \ SQR_tb[(w) >> 60 & 0xF] << 56 | SQR_tb[(w) >> 56 & 0xF] << 48 | \ SQR_tb[(w) >> 52 & 0xF] << 40 | SQR_tb[(w) >> 48 & 0xF] << 32 | \ SQR_tb[(w) >> 44 & 0xF] << 24 | SQR_tb[(w) >> 40 & 0xF] << 16 | \ SQR_tb[(w) >> 36 & 0xF] << 8 | SQR_tb[(w) >> 32 & 0xF] # define SQR0(w) \ SQR_tb[(w) >> 28 & 0xF] << 56 | SQR_tb[(w) >> 24 & 0xF] << 48 | \ SQR_tb[(w) >> 20 & 0xF] << 40 | SQR_tb[(w) >> 16 & 0xF] << 32 | \ SQR_tb[(w) >> 12 & 0xF] << 24 | SQR_tb[(w) >> 8 & 0xF] << 16 | \ SQR_tb[(w) >> 4 & 0xF] << 8 | SQR_tb[(w) & 0xF] # endif # ifdef THIRTY_TWO_BIT # define SQR1(w) \ SQR_tb[(w) >> 28 & 0xF] << 24 | SQR_tb[(w) >> 24 & 0xF] << 16 | \ SQR_tb[(w) >> 20 & 0xF] << 8 | SQR_tb[(w) >> 16 & 0xF] # define SQR0(w) \ SQR_tb[(w) >> 12 & 0xF] << 24 | SQR_tb[(w) >> 8 & 0xF] << 16 | \ SQR_tb[(w) >> 4 & 0xF] << 8 | SQR_tb[(w) & 0xF] # endif # if !defined(OPENSSL_BN_ASM_GF2m) /* * Product of two polynomials a, b each with degree < BN_BITS2 - 1, result is * a polynomial r with degree < 2 * BN_BITS - 1 The caller MUST ensure that * the variables have the right amount of space allocated. */ # ifdef THIRTY_TWO_BIT static void bn_GF2m_mul_1x1(BN_ULONG *r1, BN_ULONG *r0, const BN_ULONG a, const BN_ULONG b) { register BN_ULONG h, l, s; BN_ULONG tab[8], top2b = a >> 30; register BN_ULONG a1, a2, a4; a1 = a & (0x3FFFFFFF); a2 = a1 << 1; a4 = a2 << 1; tab[0] = 0; tab[1] = a1; tab[2] = a2; tab[3] = a1 ^ a2; tab[4] = a4; tab[5] = a1 ^ a4; tab[6] = a2 ^ a4; tab[7] = a1 ^ a2 ^ a4; s = tab[b & 0x7]; l = s; s = tab[b >> 3 & 0x7]; l ^= s << 3; h = s >> 29; s = tab[b >> 6 & 0x7]; l ^= s << 6; h ^= s >> 26; s = tab[b >> 9 & 0x7]; l ^= s << 9; h ^= s >> 23; s = tab[b >> 12 & 0x7]; l ^= s << 12; h ^= s >> 20; s = tab[b >> 15 & 0x7]; l ^= s << 15; h ^= s >> 17; s = tab[b >> 18 & 0x7]; l ^= s << 18; h ^= s >> 14; s = tab[b >> 21 & 0x7]; l ^= s << 21; h ^= s >> 11; s = tab[b >> 24 & 0x7]; l ^= s << 24; h ^= s >> 8; s = tab[b >> 27 & 0x7]; l ^= s << 27; h ^= s >> 5; s = tab[b >> 30]; l ^= s << 30; h ^= s >> 2; /* compensate for the top two bits of a */ if (top2b & 01) { l ^= b << 30; h ^= b >> 2; } if (top2b & 02) { l ^= b << 31; h ^= b >> 1; } *r1 = h; *r0 = l; } # endif # if defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG) static void bn_GF2m_mul_1x1(BN_ULONG *r1, BN_ULONG *r0, const BN_ULONG a, const BN_ULONG b) { register BN_ULONG h, l, s; BN_ULONG tab[16], top3b = a >> 61; register BN_ULONG a1, a2, a4, a8; a1 = a & (0x1FFFFFFFFFFFFFFFULL); a2 = a1 << 1; a4 = a2 << 1; a8 = a4 << 1; tab[0] = 0; tab[1] = a1; tab[2] = a2; tab[3] = a1 ^ a2; tab[4] = a4; tab[5] = a1 ^ a4; tab[6] = a2 ^ a4; tab[7] = a1 ^ a2 ^ a4; tab[8] = a8; tab[9] = a1 ^ a8; tab[10] = a2 ^ a8; tab[11] = a1 ^ a2 ^ a8; tab[12] = a4 ^ a8; tab[13] = a1 ^ a4 ^ a8; tab[14] = a2 ^ a4 ^ a8; tab[15] = a1 ^ a2 ^ a4 ^ a8; s = tab[b & 0xF]; l = s; s = tab[b >> 4 & 0xF]; l ^= s << 4; h = s >> 60; s = tab[b >> 8 & 0xF]; l ^= s << 8; h ^= s >> 56; s = tab[b >> 12 & 0xF]; l ^= s << 12; h ^= s >> 52; s = tab[b >> 16 & 0xF]; l ^= s << 16; h ^= s >> 48; s = tab[b >> 20 & 0xF]; l ^= s << 20; h ^= s >> 44; s = tab[b >> 24 & 0xF]; l ^= s << 24; h ^= s >> 40; s = tab[b >> 28 & 0xF]; l ^= s << 28; h ^= s >> 36; s = tab[b >> 32 & 0xF]; l ^= s << 32; h ^= s >> 32; s = tab[b >> 36 & 0xF]; l ^= s << 36; h ^= s >> 28; s = tab[b >> 40 & 0xF]; l ^= s << 40; h ^= s >> 24; s = tab[b >> 44 & 0xF]; l ^= s << 44; h ^= s >> 20; s = tab[b >> 48 & 0xF]; l ^= s << 48; h ^= s >> 16; s = tab[b >> 52 & 0xF]; l ^= s << 52; h ^= s >> 12; s = tab[b >> 56 & 0xF]; l ^= s << 56; h ^= s >> 8; s = tab[b >> 60]; l ^= s << 60; h ^= s >> 4; /* compensate for the top three bits of a */ if (top3b & 01) { l ^= b << 61; h ^= b >> 3; } if (top3b & 02) { l ^= b << 62; h ^= b >> 2; } if (top3b & 04) { l ^= b << 63; h ^= b >> 1; } *r1 = h; *r0 = l; } # endif /* * Product of two polynomials a, b each with degree < 2 * BN_BITS2 - 1, * result is a polynomial r with degree < 4 * BN_BITS2 - 1 The caller MUST * ensure that the variables have the right amount of space allocated. */ static void bn_GF2m_mul_2x2(BN_ULONG *r, const BN_ULONG a1, const BN_ULONG a0, const BN_ULONG b1, const BN_ULONG b0) { BN_ULONG m1, m0; /* r[3] = h1, r[2] = h0; r[1] = l1; r[0] = l0 */ bn_GF2m_mul_1x1(r + 3, r + 2, a1, b1); bn_GF2m_mul_1x1(r + 1, r, a0, b0); bn_GF2m_mul_1x1(&m1, &m0, a0 ^ a1, b0 ^ b1); /* Correction on m1 ^= l1 ^ h1; m0 ^= l0 ^ h0; */ r[2] ^= m1 ^ r[1] ^ r[3]; /* h0 ^= m1 ^ l1 ^ h1; */ r[1] = r[3] ^ r[2] ^ r[0] ^ m1 ^ m0; /* l1 ^= l0 ^ h0 ^ m0; */ } # else void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0); # endif /* * Add polynomials a and b and store result in r; r could be a or b, a and b * could be equal; r is the bitwise XOR of a and b. */ int BN_GF2m_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { int i; const BIGNUM *at, *bt; bn_check_top(a); bn_check_top(b); if (a->top < b->top) { at = b; bt = a; } else { at = a; bt = b; } if (bn_wexpand(r, at->top) == NULL) return 0; for (i = 0; i < bt->top; i++) { r->d[i] = at->d[i] ^ bt->d[i]; } for (; i < at->top; i++) { r->d[i] = at->d[i]; } r->top = at->top; bn_correct_top(r); return 1; } /*- * Some functions allow for representation of the irreducible polynomials * as an int[], say p. The irreducible f(t) is then of the form: * t^p[0] + t^p[1] + ... + t^p[k] * where m = p[0] > p[1] > ... > p[k] = 0. */ /* Performs modular reduction of a and store result in r. r could be a. */ int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[]) { int j, k; int n, dN, d0, d1; BN_ULONG zz, *z; bn_check_top(a); if (!p[0]) { /* reduction mod 1 => return 0 */ BN_zero(r); return 1; } /* * Since the algorithm does reduction in the r value, if a != r, copy the * contents of a into r so we can do reduction in r. */ if (a != r) { if (!bn_wexpand(r, a->top)) return 0; for (j = 0; j < a->top; j++) { r->d[j] = a->d[j]; } r->top = a->top; } z = r->d; /* start reduction */ dN = p[0] / BN_BITS2; for (j = r->top - 1; j > dN;) { zz = z[j]; if (z[j] == 0) { j--; continue; } z[j] = 0; for (k = 1; p[k] != 0; k++) { /* reducing component t^p[k] */ n = p[0] - p[k]; d0 = n % BN_BITS2; d1 = BN_BITS2 - d0; n /= BN_BITS2; z[j - n] ^= (zz >> d0); if (d0) z[j - n - 1] ^= (zz << d1); } /* reducing component t^0 */ n = dN; d0 = p[0] % BN_BITS2; d1 = BN_BITS2 - d0; z[j - n] ^= (zz >> d0); if (d0) z[j - n - 1] ^= (zz << d1); } /* final round of reduction */ while (j == dN) { d0 = p[0] % BN_BITS2; zz = z[dN] >> d0; if (zz == 0) break; d1 = BN_BITS2 - d0; /* clear up the top d1 bits */ if (d0) z[dN] = (z[dN] << d1) >> d1; else z[dN] = 0; z[0] ^= zz; /* reduction t^0 component */ for (k = 1; p[k] != 0; k++) { BN_ULONG tmp_ulong; /* reducing component t^p[k] */ n = p[k] / BN_BITS2; d0 = p[k] % BN_BITS2; d1 = BN_BITS2 - d0; z[n] ^= (zz << d0); if (d0 && (tmp_ulong = zz >> d1)) z[n + 1] ^= tmp_ulong; } } bn_correct_top(r); return 1; } /* * Performs modular reduction of a by p and store result in r. r could be a. * This function calls down to the BN_GF2m_mod_arr implementation; this wrapper * function is only provided for convenience; for best performance, use the * BN_GF2m_mod_arr function. */ int BN_GF2m_mod(BIGNUM *r, const BIGNUM *a, const BIGNUM *p) { int ret = 0; int arr[6]; bn_check_top(a); bn_check_top(p); ret = BN_GF2m_poly2arr(p, arr, OSSL_NELEM(arr)); if (!ret || ret > (int)OSSL_NELEM(arr)) { BNerr(BN_F_BN_GF2M_MOD, BN_R_INVALID_LENGTH); return 0; } ret = BN_GF2m_mod_arr(r, a, arr); bn_check_top(r); return ret; } /* * Compute the product of two polynomials a and b, reduce modulo p, and store * the result in r. r could be a or b; a could be b. */ int BN_GF2m_mod_mul_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const int p[], BN_CTX *ctx) { int zlen, i, j, k, ret = 0; BIGNUM *s; BN_ULONG x1, x0, y1, y0, zz[4]; bn_check_top(a); bn_check_top(b); if (a == b) { return BN_GF2m_mod_sqr_arr(r, a, p, ctx); } BN_CTX_start(ctx); if ((s = BN_CTX_get(ctx)) == NULL) goto err; zlen = a->top + b->top + 4; if (!bn_wexpand(s, zlen)) goto err; s->top = zlen; for (i = 0; i < zlen; i++) s->d[i] = 0; for (j = 0; j < b->top; j += 2) { y0 = b->d[j]; y1 = ((j + 1) == b->top) ? 0 : b->d[j + 1]; for (i = 0; i < a->top; i += 2) { x0 = a->d[i]; x1 = ((i + 1) == a->top) ? 0 : a->d[i + 1]; bn_GF2m_mul_2x2(zz, x1, x0, y1, y0); for (k = 0; k < 4; k++) s->d[i + j + k] ^= zz[k]; } } bn_correct_top(s); if (BN_GF2m_mod_arr(r, s, p)) ret = 1; bn_check_top(r); err: BN_CTX_end(ctx); return ret; } /* * Compute the product of two polynomials a and b, reduce modulo p, and store * the result in r. r could be a or b; a could equal b. This function calls * down to the BN_GF2m_mod_mul_arr implementation; this wrapper function is * only provided for convenience; for best performance, use the * BN_GF2m_mod_mul_arr function. */ int BN_GF2m_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx) { int ret = 0; const int max = BN_num_bits(p) + 1; int *arr = NULL; bn_check_top(a); bn_check_top(b); bn_check_top(p); if ((arr = OPENSSL_malloc(sizeof(*arr) * max)) == NULL) goto err; ret = BN_GF2m_poly2arr(p, arr, max); if (!ret || ret > max) { BNerr(BN_F_BN_GF2M_MOD_MUL, BN_R_INVALID_LENGTH); goto err; } ret = BN_GF2m_mod_mul_arr(r, a, b, arr, ctx); bn_check_top(r); err: OPENSSL_free(arr); return ret; } /* Square a, reduce the result mod p, and store it in a. r could be a. */ int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const int p[], BN_CTX *ctx) { int i, ret = 0; BIGNUM *s; bn_check_top(a); BN_CTX_start(ctx); if ((s = BN_CTX_get(ctx)) == NULL) return 0; if (!bn_wexpand(s, 2 * a->top)) goto err; for (i = a->top - 1; i >= 0; i--) { s->d[2 * i + 1] = SQR1(a->d[i]); s->d[2 * i] = SQR0(a->d[i]); } s->top = 2 * a->top; bn_correct_top(s); if (!BN_GF2m_mod_arr(r, s, p)) goto err; bn_check_top(r); ret = 1; err: BN_CTX_end(ctx); return ret; } /* * Square a, reduce the result mod p, and store it in a. r could be a. This * function calls down to the BN_GF2m_mod_sqr_arr implementation; this * wrapper function is only provided for convenience; for best performance, * use the BN_GF2m_mod_sqr_arr function. */ int BN_GF2m_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { int ret = 0; const int max = BN_num_bits(p) + 1; int *arr = NULL; bn_check_top(a); bn_check_top(p); if ((arr = OPENSSL_malloc(sizeof(*arr) * max)) == NULL) goto err; ret = BN_GF2m_poly2arr(p, arr, max); if (!ret || ret > max) { BNerr(BN_F_BN_GF2M_MOD_SQR, BN_R_INVALID_LENGTH); goto err; } ret = BN_GF2m_mod_sqr_arr(r, a, arr, ctx); bn_check_top(r); err: OPENSSL_free(arr); return ret; } /* * Invert a, reduce modulo p, and store the result in r. r could be a. Uses * Modified Almost Inverse Algorithm (Algorithm 10) from Hankerson, D., * Hernandez, J.L., and Menezes, A. "Software Implementation of Elliptic * Curve Cryptography Over Binary Fields". */ int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { BIGNUM *b, *c = NULL, *u = NULL, *v = NULL, *tmp; int ret = 0; bn_check_top(a); bn_check_top(p); BN_CTX_start(ctx); if ((b = BN_CTX_get(ctx)) == NULL) goto err; if ((c = BN_CTX_get(ctx)) == NULL) goto err; if ((u = BN_CTX_get(ctx)) == NULL) goto err; if ((v = BN_CTX_get(ctx)) == NULL) goto err; if (!BN_GF2m_mod(u, a, p)) goto err; if (BN_is_zero(u)) goto err; if (!BN_copy(v, p)) goto err; # if 0 if (!BN_one(b)) goto err; while (1) { while (!BN_is_odd(u)) { if (BN_is_zero(u)) goto err; if (!BN_rshift1(u, u)) goto err; if (BN_is_odd(b)) { if (!BN_GF2m_add(b, b, p)) goto err; } if (!BN_rshift1(b, b)) goto err; } if (BN_abs_is_word(u, 1)) break; if (BN_num_bits(u) < BN_num_bits(v)) { tmp = u; u = v; v = tmp; tmp = b; b = c; c = tmp; } if (!BN_GF2m_add(u, u, v)) goto err; if (!BN_GF2m_add(b, b, c)) goto err; } # else { int i, ubits = BN_num_bits(u), vbits = BN_num_bits(v), /* v is copy * of p */ top = p->top; BN_ULONG *udp, *bdp, *vdp, *cdp; bn_wexpand(u, top); udp = u->d; for (i = u->top; i < top; i++) udp[i] = 0; u->top = top; bn_wexpand(b, top); bdp = b->d; bdp[0] = 1; for (i = 1; i < top; i++) bdp[i] = 0; b->top = top; bn_wexpand(c, top); cdp = c->d; for (i = 0; i < top; i++) cdp[i] = 0; c->top = top; vdp = v->d; /* It pays off to "cache" *->d pointers, * because it allows optimizer to be more * aggressive. But we don't have to "cache" * p->d, because *p is declared 'const'... */ while (1) { while (ubits && !(udp[0] & 1)) { BN_ULONG u0, u1, b0, b1, mask; u0 = udp[0]; b0 = bdp[0]; mask = (BN_ULONG)0 - (b0 & 1); b0 ^= p->d[0] & mask; for (i = 0; i < top - 1; i++) { u1 = udp[i + 1]; udp[i] = ((u0 >> 1) | (u1 << (BN_BITS2 - 1))) & BN_MASK2; u0 = u1; b1 = bdp[i + 1] ^ (p->d[i + 1] & mask); bdp[i] = ((b0 >> 1) | (b1 << (BN_BITS2 - 1))) & BN_MASK2; b0 = b1; } udp[i] = u0 >> 1; bdp[i] = b0 >> 1; ubits--; } if (ubits <= BN_BITS2 && udp[0] == 1) break; if (ubits < vbits) { i = ubits; ubits = vbits; vbits = i; tmp = u; u = v; v = tmp; tmp = b; b = c; c = tmp; udp = vdp; vdp = v->d; bdp = cdp; cdp = c->d; } for (i = 0; i < top; i++) { udp[i] ^= vdp[i]; bdp[i] ^= cdp[i]; } if (ubits == vbits) { BN_ULONG ul; int utop = (ubits - 1) / BN_BITS2; while ((ul = udp[utop]) == 0 && utop) utop--; ubits = utop * BN_BITS2 + BN_num_bits_word(ul); } } bn_correct_top(b); } # endif if (!BN_copy(r, b)) goto err; bn_check_top(r); ret = 1; err: # ifdef BN_DEBUG /* BN_CTX_end would complain about the * expanded form */ bn_correct_top(c); bn_correct_top(u); bn_correct_top(v); # endif BN_CTX_end(ctx); return ret; } /* * Invert xx, reduce modulo p, and store the result in r. r could be xx. * This function calls down to the BN_GF2m_mod_inv implementation; this * wrapper function is only provided for convenience; for best performance, * use the BN_GF2m_mod_inv function. */ int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *xx, const int p[], BN_CTX *ctx) { BIGNUM *field; int ret = 0; bn_check_top(xx); BN_CTX_start(ctx); if ((field = BN_CTX_get(ctx)) == NULL) goto err; if (!BN_GF2m_arr2poly(p, field)) goto err; ret = BN_GF2m_mod_inv(r, xx, field, ctx); bn_check_top(r); err: BN_CTX_end(ctx); return ret; } # ifndef OPENSSL_SUN_GF2M_DIV /* * Divide y by x, reduce modulo p, and store the result in r. r could be x * or y, x could equal y. */ int BN_GF2m_mod_div(BIGNUM *r, const BIGNUM *y, const BIGNUM *x, const BIGNUM *p, BN_CTX *ctx) { BIGNUM *xinv = NULL; int ret = 0; bn_check_top(y); bn_check_top(x); bn_check_top(p); BN_CTX_start(ctx); xinv = BN_CTX_get(ctx); if (xinv == NULL) goto err; if (!BN_GF2m_mod_inv(xinv, x, p, ctx)) goto err; if (!BN_GF2m_mod_mul(r, y, xinv, p, ctx)) goto err; bn_check_top(r); ret = 1; err: BN_CTX_end(ctx); return ret; } # else /* * Divide y by x, reduce modulo p, and store the result in r. r could be x * or y, x could equal y. Uses algorithm Modular_Division_GF(2^m) from * Chang-Shantz, S. "From Euclid's GCD to Montgomery Multiplication to the * Great Divide". */ int BN_GF2m_mod_div(BIGNUM *r, const BIGNUM *y, const BIGNUM *x, const BIGNUM *p, BN_CTX *ctx) { BIGNUM *a, *b, *u, *v; int ret = 0; bn_check_top(y); bn_check_top(x); bn_check_top(p); BN_CTX_start(ctx); a = BN_CTX_get(ctx); b = BN_CTX_get(ctx); u = BN_CTX_get(ctx); v = BN_CTX_get(ctx); if (v == NULL) goto err; /* reduce x and y mod p */ if (!BN_GF2m_mod(u, y, p)) goto err; if (!BN_GF2m_mod(a, x, p)) goto err; if (!BN_copy(b, p)) goto err; while (!BN_is_odd(a)) { if (!BN_rshift1(a, a)) goto err; if (BN_is_odd(u)) if (!BN_GF2m_add(u, u, p)) goto err; if (!BN_rshift1(u, u)) goto err; } do { if (BN_GF2m_cmp(b, a) > 0) { if (!BN_GF2m_add(b, b, a)) goto err; if (!BN_GF2m_add(v, v, u)) goto err; do { if (!BN_rshift1(b, b)) goto err; if (BN_is_odd(v)) if (!BN_GF2m_add(v, v, p)) goto err; if (!BN_rshift1(v, v)) goto err; } while (!BN_is_odd(b)); } else if (BN_abs_is_word(a, 1)) break; else { if (!BN_GF2m_add(a, a, b)) goto err; if (!BN_GF2m_add(u, u, v)) goto err; do { if (!BN_rshift1(a, a)) goto err; if (BN_is_odd(u)) if (!BN_GF2m_add(u, u, p)) goto err; if (!BN_rshift1(u, u)) goto err; } while (!BN_is_odd(a)); } } while (1); if (!BN_copy(r, u)) goto err; bn_check_top(r); ret = 1; err: BN_CTX_end(ctx); return ret; } # endif /* * Divide yy by xx, reduce modulo p, and store the result in r. r could be xx * * or yy, xx could equal yy. This function calls down to the * BN_GF2m_mod_div implementation; this wrapper function is only provided for * convenience; for best performance, use the BN_GF2m_mod_div function. */ int BN_GF2m_mod_div_arr(BIGNUM *r, const BIGNUM *yy, const BIGNUM *xx, const int p[], BN_CTX *ctx) { BIGNUM *field; int ret = 0; bn_check_top(yy); bn_check_top(xx); BN_CTX_start(ctx); if ((field = BN_CTX_get(ctx)) == NULL) goto err; if (!BN_GF2m_arr2poly(p, field)) goto err; ret = BN_GF2m_mod_div(r, yy, xx, field, ctx); bn_check_top(r); err: BN_CTX_end(ctx); return ret; } /* * Compute the bth power of a, reduce modulo p, and store the result in r. r * could be a. Uses simple square-and-multiply algorithm A.5.1 from IEEE * P1363. */ int BN_GF2m_mod_exp_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const int p[], BN_CTX *ctx) { int ret = 0, i, n; BIGNUM *u; bn_check_top(a); bn_check_top(b); if (BN_is_zero(b)) return (BN_one(r)); if (BN_abs_is_word(b, 1)) return (BN_copy(r, a) != NULL); BN_CTX_start(ctx); if ((u = BN_CTX_get(ctx)) == NULL) goto err; if (!BN_GF2m_mod_arr(u, a, p)) goto err; n = BN_num_bits(b) - 1; for (i = n - 1; i >= 0; i--) { if (!BN_GF2m_mod_sqr_arr(u, u, p, ctx)) goto err; if (BN_is_bit_set(b, i)) { if (!BN_GF2m_mod_mul_arr(u, u, a, p, ctx)) goto err; } } if (!BN_copy(r, u)) goto err; bn_check_top(r); ret = 1; err: BN_CTX_end(ctx); return ret; } /* * Compute the bth power of a, reduce modulo p, and store the result in r. r * could be a. This function calls down to the BN_GF2m_mod_exp_arr * implementation; this wrapper function is only provided for convenience; * for best performance, use the BN_GF2m_mod_exp_arr function. */ int BN_GF2m_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx) { int ret = 0; const int max = BN_num_bits(p) + 1; int *arr = NULL; bn_check_top(a); bn_check_top(b); bn_check_top(p); if ((arr = OPENSSL_malloc(sizeof(*arr) * max)) == NULL) goto err; ret = BN_GF2m_poly2arr(p, arr, max); if (!ret || ret > max) { BNerr(BN_F_BN_GF2M_MOD_EXP, BN_R_INVALID_LENGTH); goto err; } ret = BN_GF2m_mod_exp_arr(r, a, b, arr, ctx); bn_check_top(r); err: OPENSSL_free(arr); return ret; } /* * Compute the square root of a, reduce modulo p, and store the result in r. * r could be a. Uses exponentiation as in algorithm A.4.1 from IEEE P1363. */ int BN_GF2m_mod_sqrt_arr(BIGNUM *r, const BIGNUM *a, const int p[], BN_CTX *ctx) { int ret = 0; BIGNUM *u; bn_check_top(a); if (!p[0]) { /* reduction mod 1 => return 0 */ BN_zero(r); return 1; } BN_CTX_start(ctx); if ((u = BN_CTX_get(ctx)) == NULL) goto err; if (!BN_set_bit(u, p[0] - 1)) goto err; ret = BN_GF2m_mod_exp_arr(r, a, u, p, ctx); bn_check_top(r); err: BN_CTX_end(ctx); return ret; } /* * Compute the square root of a, reduce modulo p, and store the result in r. * r could be a. This function calls down to the BN_GF2m_mod_sqrt_arr * implementation; this wrapper function is only provided for convenience; * for best performance, use the BN_GF2m_mod_sqrt_arr function. */ int BN_GF2m_mod_sqrt(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { int ret = 0; const int max = BN_num_bits(p) + 1; int *arr = NULL; bn_check_top(a); bn_check_top(p); if ((arr = OPENSSL_malloc(sizeof(*arr) * max)) == NULL) goto err; ret = BN_GF2m_poly2arr(p, arr, max); if (!ret || ret > max) { BNerr(BN_F_BN_GF2M_MOD_SQRT, BN_R_INVALID_LENGTH); goto err; } ret = BN_GF2m_mod_sqrt_arr(r, a, arr, ctx); bn_check_top(r); err: OPENSSL_free(arr); return ret; } /* * Find r such that r^2 + r = a mod p. r could be a. If no r exists returns * 0. Uses algorithms A.4.7 and A.4.6 from IEEE P1363. */ int BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a_, const int p[], BN_CTX *ctx) { int ret = 0, count = 0, j; BIGNUM *a, *z, *rho, *w, *w2, *tmp; bn_check_top(a_); if (!p[0]) { /* reduction mod 1 => return 0 */ BN_zero(r); return 1; } BN_CTX_start(ctx); a = BN_CTX_get(ctx); z = BN_CTX_get(ctx); w = BN_CTX_get(ctx); if (w == NULL) goto err; if (!BN_GF2m_mod_arr(a, a_, p)) goto err; if (BN_is_zero(a)) { BN_zero(r); ret = 1; goto err; } if (p[0] & 0x1) { /* m is odd */ /* compute half-trace of a */ if (!BN_copy(z, a)) goto err; for (j = 1; j <= (p[0] - 1) / 2; j++) { if (!BN_GF2m_mod_sqr_arr(z, z, p, ctx)) goto err; if (!BN_GF2m_mod_sqr_arr(z, z, p, ctx)) goto err; if (!BN_GF2m_add(z, z, a)) goto err; } } else { /* m is even */ rho = BN_CTX_get(ctx); w2 = BN_CTX_get(ctx); tmp = BN_CTX_get(ctx); if (tmp == NULL) goto err; do { if (!BN_rand(rho, p[0], 0, 0)) goto err; if (!BN_GF2m_mod_arr(rho, rho, p)) goto err; BN_zero(z); if (!BN_copy(w, rho)) goto err; for (j = 1; j <= p[0] - 1; j++) { if (!BN_GF2m_mod_sqr_arr(z, z, p, ctx)) goto err; if (!BN_GF2m_mod_sqr_arr(w2, w, p, ctx)) goto err; if (!BN_GF2m_mod_mul_arr(tmp, w2, a, p, ctx)) goto err; if (!BN_GF2m_add(z, z, tmp)) goto err; if (!BN_GF2m_add(w, w2, rho)) goto err; } count++; } while (BN_is_zero(w) && (count < MAX_ITERATIONS)); if (BN_is_zero(w)) { BNerr(BN_F_BN_GF2M_MOD_SOLVE_QUAD_ARR, BN_R_TOO_MANY_ITERATIONS); goto err; } } if (!BN_GF2m_mod_sqr_arr(w, z, p, ctx)) goto err; if (!BN_GF2m_add(w, z, w)) goto err; if (BN_GF2m_cmp(w, a)) { BNerr(BN_F_BN_GF2M_MOD_SOLVE_QUAD_ARR, BN_R_NO_SOLUTION); goto err; } if (!BN_copy(r, z)) goto err; bn_check_top(r); ret = 1; err: BN_CTX_end(ctx); return ret; } /* * Find r such that r^2 + r = a mod p. r could be a. If no r exists returns * 0. This function calls down to the BN_GF2m_mod_solve_quad_arr * implementation; this wrapper function is only provided for convenience; * for best performance, use the BN_GF2m_mod_solve_quad_arr function. */ int BN_GF2m_mod_solve_quad(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { int ret = 0; const int max = BN_num_bits(p) + 1; int *arr = NULL; bn_check_top(a); bn_check_top(p); if ((arr = OPENSSL_malloc(sizeof(*arr) * max)) == NULL) goto err; ret = BN_GF2m_poly2arr(p, arr, max); if (!ret || ret > max) { BNerr(BN_F_BN_GF2M_MOD_SOLVE_QUAD, BN_R_INVALID_LENGTH); goto err; } ret = BN_GF2m_mod_solve_quad_arr(r, a, arr, ctx); bn_check_top(r); err: OPENSSL_free(arr); return ret; } /* * Convert the bit-string representation of a polynomial ( \sum_{i=0}^n a_i * * x^i) into an array of integers corresponding to the bits with non-zero * coefficient. Array is terminated with -1. Up to max elements of the array * will be filled. Return value is total number of array elements that would * be filled if array was large enough. */ int BN_GF2m_poly2arr(const BIGNUM *a, int p[], int max) { int i, j, k = 0; BN_ULONG mask; if (BN_is_zero(a)) return 0; for (i = a->top - 1; i >= 0; i--) { if (!a->d[i]) /* skip word if a->d[i] == 0 */ continue; mask = BN_TBIT; for (j = BN_BITS2 - 1; j >= 0; j--) { if (a->d[i] & mask) { if (k < max) p[k] = BN_BITS2 * i + j; k++; } mask >>= 1; } } if (k < max) { p[k] = -1; k++; } return k; } /* * Convert the coefficient array representation of a polynomial to a * bit-string. The array must be terminated by -1. */ int BN_GF2m_arr2poly(const int p[], BIGNUM *a) { int i; bn_check_top(a); BN_zero(a); for (i = 0; p[i] != -1; i++) { if (BN_set_bit(a, p[i]) == 0) return 0; } bn_check_top(a); return 1; } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/bad_1493_0
crossvul-cpp_data_bad_5721_1
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The User Datagram Protocol (UDP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Alan Cox, <alan@lxorguk.ukuu.org.uk> * Hirokazu Takahashi, <taka@valinux.co.jp> * * Fixes: * Alan Cox : verify_area() calls * Alan Cox : stopped close while in use off icmp * messages. Not a fix but a botch that * for udp at least is 'valid'. * Alan Cox : Fixed icmp handling properly * Alan Cox : Correct error for oversized datagrams * Alan Cox : Tidied select() semantics. * Alan Cox : udp_err() fixed properly, also now * select and read wake correctly on errors * Alan Cox : udp_send verify_area moved to avoid mem leak * Alan Cox : UDP can count its memory * Alan Cox : send to an unknown connection causes * an ECONNREFUSED off the icmp, but * does NOT close. * Alan Cox : Switched to new sk_buff handlers. No more backlog! * Alan Cox : Using generic datagram code. Even smaller and the PEEK * bug no longer crashes it. * Fred Van Kempen : Net2e support for sk->broadcast. * Alan Cox : Uses skb_free_datagram * Alan Cox : Added get/set sockopt support. * Alan Cox : Broadcasting without option set returns EACCES. * Alan Cox : No wakeup calls. Instead we now use the callbacks. * Alan Cox : Use ip_tos and ip_ttl * Alan Cox : SNMP Mibs * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. * Matt Dillon : UDP length checks. * Alan Cox : Smarter af_inet used properly. * Alan Cox : Use new kernel side addressing. * Alan Cox : Incorrect return on truncated datagram receive. * Arnt Gulbrandsen : New udp_send and stuff * Alan Cox : Cache last socket * Alan Cox : Route cache * Jon Peatfield : Minor efficiency fix to sendto(). * Mike Shaver : RFC1122 checks. * Alan Cox : Nonblocking error fix. * Willy Konynenberg : Transparent proxying support. * Mike McLagan : Routing by source * David S. Miller : New socket lookup architecture. * Last socket cache retained as it * does have a high hit rate. * Olaf Kirch : Don't linearise iovec on sendmsg. * Andi Kleen : Some cleanups, cache destination entry * for connect. * Vitaly E. Lavrov : Transparent proxy revived after year coma. * Melvin Smith : Check msg_name not msg_namelen in sendto(), * return ENOTCONN for unconnected sockets (POSIX) * Janos Farkas : don't deliver multi/broadcasts to a different * bound-to-device socket * Hirokazu Takahashi : HW checksumming for outgoing UDP * datagrams. * Hirokazu Takahashi : sendfile() on UDP works now. * Arnaldo C. Melo : convert /proc/net/udp to seq_file * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support * James Chapman : Add L2TP encapsulation type. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "UDP: " fmt #include <asm/uaccess.h> #include <asm/ioctls.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/module.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/igmp.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <net/tcp_states.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/route.h> #include <net/checksum.h> #include <net/xfrm.h> #include <trace/events/udp.h> #include <linux/static_key.h> #include <trace/events/skb.h> #include <net/ll_poll.h> #include "udp_impl.h" struct udp_table udp_table __read_mostly; EXPORT_SYMBOL(udp_table); long sysctl_udp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_udp_mem); int sysctl_udp_rmem_min __read_mostly; EXPORT_SYMBOL(sysctl_udp_rmem_min); int sysctl_udp_wmem_min __read_mostly; EXPORT_SYMBOL(sysctl_udp_wmem_min); atomic_long_t udp_memory_allocated; EXPORT_SYMBOL(udp_memory_allocated); #define MAX_UDP_PORTS 65536 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2), unsigned int log) { struct sock *sk2; struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); sk_nulls_for_each(sk2, node, &hslot->head) if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || !uid_eq(uid, sock_i_uid(sk2))) && (*saddr_comp)(sk, sk2)) { if (bitmap) __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); else return 1; } return 0; } /* * Note: we still hold spinlock of primary hash chain, so no other writer * can insert/delete a socket with local_port == num */ static int udp_lib_lport_inuse2(struct net *net, __u16 num, struct udp_hslot *hslot2, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2)) { struct sock *sk2; struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); int res = 0; spin_lock(&hslot2->lock); udp_portaddr_for_each_entry(sk2, node, &hslot2->head) if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || !uid_eq(uid, sock_i_uid(sk2))) && (*saddr_comp)(sk, sk2)) { res = 1; break; } spin_unlock(&hslot2->lock); return res; } /** * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 * * @sk: socket struct in question * @snum: port number to look up * @saddr_comp: AF-dependent comparison of bound local IP addresses * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, * with NULL address */ int udp_lib_get_port(struct sock *sk, unsigned short snum, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2), unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; struct udp_table *udptable = sk->sk_prot->h.udp_table; int error = 1; struct net *net = sock_net(sk); if (!snum) { int low, high, remaining; unsigned int rand; unsigned short first, last; DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; rand = net_random(); first = (((u64)rand * remaining) >> 32) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE */ rand = (rand | 1) * (udptable->mask + 1); last = first + udptable->mask + 1; do { hslot = udp_hashslot(udptable, net, first); bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, saddr_comp, udptable->log); snum = first; /* * Iterate on all possible values of snum for this hash. * Using steps of an odd multiple of UDP_HTABLE_SIZE * give us randomization and full range coverage. */ do { if (low <= snum && snum <= high && !test_bit(snum >> udptable->log, bitmap) && !inet_is_reserved_local_port(snum)) goto found; snum += rand; } while (snum != first); spin_unlock_bh(&hslot->lock); } while (++first != last); goto fail; } else { hslot = udp_hashslot(udptable, net, snum); spin_lock_bh(&hslot->lock); if (hslot->count > 10) { int exist; unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; slot2 &= udptable->mask; hash2_nulladdr &= udptable->mask; hslot2 = udp_hashslot2(udptable, slot2); if (hslot->count < hslot2->count) goto scan_primary_hash; exist = udp_lib_lport_inuse2(net, snum, hslot2, sk, saddr_comp); if (!exist && (hash2_nulladdr != slot2)) { hslot2 = udp_hashslot2(udptable, hash2_nulladdr); exist = udp_lib_lport_inuse2(net, snum, hslot2, sk, saddr_comp); } if (exist) goto fail_unlock; else goto found; } scan_primary_hash: if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp, 0)) goto fail_unlock; } found: inet_sk(sk)->inet_num = snum; udp_sk(sk)->udp_port_hash = snum; udp_sk(sk)->udp_portaddr_hash ^= snum; if (sk_unhashed(sk)) { sk_nulls_add_node_rcu(sk, &hslot->head); hslot->count++; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock(&hslot2->lock); hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); hslot2->count++; spin_unlock(&hslot2->lock); } error = 0; fail_unlock: spin_unlock_bh(&hslot->lock); fail: return error; } EXPORT_SYMBOL(udp_lib_get_port); static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) { struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); return (!ipv6_only_sock(sk2) && (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr || inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)); } static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, unsigned int port) { return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; } int udp_v4_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); unsigned int hash2_partial = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); } static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, unsigned short hnum, __be16 sport, __be32 daddr, __be16 dport, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && !ipv6_only_sock(sk)) { struct inet_sock *inet = inet_sk(sk); score = (sk->sk_family == PF_INET ? 2 : 1); if (inet->inet_rcv_saddr) { if (inet->inet_rcv_saddr != daddr) return -1; score += 4; } if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } } return score; } /* * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num) */ static inline int compute_score2(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) { struct inet_sock *inet = inet_sk(sk); if (inet->inet_rcv_saddr != daddr) return -1; if (inet->inet_num != hnum) return -1; score = (sk->sk_family == PF_INET ? 2 : 1); if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } } return score; } /* called with read_rcu_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif, struct udp_hslot *hslot2, unsigned int slot2) { struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; begin: result = NULL; badness = 0; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = inet_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } } else if (score == badness && reuseport) { matches++; if (((u64)hash * matches) >> 32 == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot2) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score2(result, net, saddr, sport, daddr, hnum, dif) < badness)) { sock_put(result); goto begin; } } return result; } /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, struct udp_table *udptable) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; rcu_read_lock(); if (hslot->count > 10) { hash2 = udp4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, slot2); if (!result) { hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, htonl(INADDR_ANY), hnum, dif, hslot2, slot2); } rcu_read_unlock(); return result; } begin: result = NULL; badness = 0; sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, saddr, hnum, sport, daddr, dport, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = inet_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } } else if (score == badness && reuseport) { matches++; if (((u64)hash * matches) >> 32 == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score(result, net, saddr, hnum, sport, daddr, dport, dif) < badness)) { sock_put(result); goto begin; } } rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(__udp4_lib_lookup); static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { struct sock *sk; const struct iphdr *iph = ip_hdr(skb); if (unlikely(sk = skb_steal_sock(skb))) return sk; else return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, iph->daddr, dport, inet_iif(skb), udptable); } struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); } EXPORT_SYMBOL_GPL(udp4_lib_lookup); static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { struct hlist_nulls_node *node; struct sock *s = sk; unsigned short hnum = ntohs(loc_port); sk_nulls_for_each_from(s, node) { struct inet_sock *inet = inet_sk(s); if (!net_eq(sock_net(s), net) || udp_sk(s)->udp_port_hash != hnum || (inet->inet_daddr && inet->inet_daddr != rmt_addr) || (inet->inet_dport != rmt_port && inet->inet_dport) || (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || ipv6_only_sock(s) || (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) continue; if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) continue; goto found; } s = NULL; found: return s; } /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. * Header points to the ip header of the error packet. We move * on past this. Then (as it used to claim before adjustment) * header points to the first 8 bytes of the udp header. We need * to find the appropriate port. */ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) { struct inet_sock *inet; const struct iphdr *iph = (const struct iphdr *)skb->data; struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; int harderr; int err; struct net *net = dev_net(skb->dev); sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex, udptable); if (sk == NULL) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; /* No socket for error */ } err = 0; harderr = 0; inet = inet_sk(sk); switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: goto out; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ ipv4_sk_update_pmtu(skb, sk, info); if (inet->pmtudisc != IP_PMTUDISC_DONT) { err = EMSGSIZE; harderr = 1; break; } goto out; } err = EHOSTUNREACH; if (code <= NR_ICMP_UNREACH) { harderr = icmp_err_convert[code].fatal; err = icmp_err_convert[code].errno; } break; case ICMP_REDIRECT: ipv4_sk_redirect(skb, sk); break; } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ if (!inet->recverr) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } void udp_err(struct sk_buff *skb, u32 info) { __udp4_lib_err(skb, info, &udp_table); } /* * Throw away all pending data and cancel the corking. Socket is locked. */ void udp_flush_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); if (up->pending) { up->len = 0; up->pending = 0; ip_flush_pending_frames(sk); } } EXPORT_SYMBOL(udp_flush_pending_frames); /** * udp4_hwcsum - handle outgoing HW checksumming * @skb: sk_buff containing the filled-in UDP header * (checksum field must be zeroed out) * @src: source IP address * @dst: destination IP address */ static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) { struct udphdr *uh = udp_hdr(skb); struct sk_buff *frags = skb_shinfo(skb)->frag_list; int offset = skb_transport_offset(skb); int len = skb->len - offset; int hlen = len; __wsum csum = 0; if (!frags) { /* * Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); } else { /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ do { csum = csum_add(csum, frags->csum); hlen -= frags->len; } while ((frags = frags->next)); csum = skb_checksum(skb, offset, hlen, csum); skb->ip_summed = CHECKSUM_NONE; uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } } static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) { struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct udphdr *uh; int err = 0; int is_udplite = IS_UDPLITE(sk); int offset = skb_transport_offset(skb); int len = skb->len - offset; __wsum csum = 0; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = inet->inet_sport; uh->dest = fl4->fl4_dport; uh->len = htons(len); uh->check = 0; if (is_udplite) /* UDP-Lite */ csum = udplite_csum(skb); else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ skb->ip_summed = CHECKSUM_NONE; goto send; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp4_hwcsum(skb, fl4->saddr, fl4->daddr); goto send; } else csum = udp_csum(skb); /* add protocol-dependent pseudo-header */ uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, sk->sk_protocol, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip_send_skb(sock_net(sk), skb); if (err) { if (err == -ENOBUFS && !inet->recverr) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); return err; } /* * Push out all pending data as one UDP datagram. Socket is locked. */ static int udp_push_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi4 *fl4 = &inet->cork.fl.u.ip4; struct sk_buff *skb; int err = 0; skb = ip_finish_skb(sk, fl4); if (!skb) goto out; err = udp_send_skb(skb, fl4); out: up->len = 0; up->pending = 0; return err; } int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); struct flowi4 fl4_stack; struct flowi4 *fl4; int ulen = len; struct ipcm_cookie ipc; struct rtable *rt = NULL; int free = 0; int connected = 0; __be32 daddr, faddr, saddr; __be16 dport; u8 tos; int err, is_udplite = IS_UDPLITE(sk); int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); struct sk_buff *skb; struct ip_options_data opt_copy; if (len > 0xFFFF) return -EMSGSIZE; /* * Check the flags. */ if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ return -EOPNOTSUPP; ipc.opt = NULL; ipc.tx_flags = 0; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; fl4 = &inet->cork.fl.u.ip4; if (up->pending) { /* * There are pending frames. * The socket lock must be held while it's corked. */ lock_sock(sk); if (likely(up->pending)) { if (unlikely(up->pending != AF_INET)) { release_sock(sk); return -EINVAL; } goto do_append_data; } release_sock(sk); } ulen += sizeof(struct udphdr); /* * Get and verify the address. */ if (msg->msg_name) { struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) { if (usin->sin_family != AF_UNSPEC) return -EAFNOSUPPORT; } daddr = usin->sin_addr.s_addr; dport = usin->sin_port; if (dport == 0) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = inet->inet_daddr; dport = inet->inet_dport; /* Open fast path for connected socket. Route will not be used, if at least one option is set. */ connected = 1; } ipc.addr = inet->inet_saddr; ipc.oif = sk->sk_bound_dev_if; sock_tx_timestamp(sk, &ipc.tx_flags); if (msg->msg_controllen) { err = ip_cmsg_send(sock_net(sk), msg, &ipc); if (err) return err; if (ipc.opt) free = 1; connected = 0; } if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } saddr = ipc.addr; ipc.addr = faddr = daddr; if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; faddr = ipc.opt->opt.faddr; connected = 0; } tos = RT_TOS(inet->tos); if (sock_flag(sk, SOCK_LOCALROUTE) || (msg->msg_flags & MSG_DONTROUTE) || (ipc.opt && ipc.opt->opt.is_strictroute)) { tos |= RTO_ONLINK; connected = 0; } if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; connected = 0; } else if (!ipc.oif) ipc.oif = inet->uc_index; if (connected) rt = (struct rtable *)sk_dst_check(sk, 0); if (rt == NULL) { struct net *net = sock_net(sk); fl4 = &fl4_stack; flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP, faddr, saddr, dport, inet->inet_sport); security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; if (err == -ENETUNREACH) IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); goto out; } err = -EACCES; if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) goto out; if (connected) sk_dst_set(sk, dst_clone(&rt->dst)); } if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: saddr = fl4->saddr; if (!ipc.addr) daddr = ipc.addr = fl4->daddr; /* Lockless fast path for the non-corking case. */ if (!corkreq) { skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), &ipc, &rt, msg->msg_flags); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_send_skb(skb, fl4); goto out; } lock_sock(sk); if (unlikely(up->pending)) { /* The socket is already corked while preparing it. */ /* ... which is an evident application bug. --ANK */ release_sock(sk); LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n")); err = -EINVAL; goto out; } /* * Now cork the socket to pend data. */ fl4 = &inet->cork.fl.u.ip4; fl4->daddr = daddr; fl4->saddr = saddr; fl4->fl4_dport = dport; fl4->fl4_sport = inet->inet_sport; up->pending = AF_INET; do_append_data: up->len += ulen; err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), &ipc, &rt, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); if (err) udp_flush_pending_frames(sk); else if (!corkreq) err = udp_push_pending_frames(sk); else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) up->pending = 0; release_sock(sk); out: ip_rt_put(rt); if (free) kfree(ipc.opt); if (!err) return len; /* * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting * ENOBUFS might not be good (it's not tunable per se), but otherwise * we don't have a good statistic (IpOutDiscards but it can be too many * things). We could add another new stat but at least for now that * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); } return err; do_confirm: dst_confirm(&rt->dst); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } EXPORT_SYMBOL(udp_sendmsg); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); int ret; if (!up->pending) { struct msghdr msg = { .msg_flags = flags|MSG_MORE }; /* Call udp_sendmsg to specify destination address which * sendpage interface can't pass. * This will succeed only when the socket is connected. */ ret = udp_sendmsg(NULL, sk, &msg, 0); if (ret < 0) return ret; } lock_sock(sk); if (unlikely(!up->pending)) { release_sock(sk); LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n")); return -EINVAL; } ret = ip_append_page(sk, &inet->cork.fl.u.ip4, page, offset, size, flags); if (ret == -EOPNOTSUPP) { release_sock(sk); return sock_no_sendpage(sk->sk_socket, page, offset, size, flags); } if (ret < 0) { udp_flush_pending_frames(sk); goto out; } up->len += size; if (!(up->corkflag || (flags&MSG_MORE))) ret = udp_push_pending_frames(sk); if (!ret) ret = size; out: release_sock(sk); return ret; } /** * first_packet_length - return length of first packet in receive queue * @sk: socket * * Drops all bad checksum frames, until a valid one is found. * Returns the length of found skb, or 0 if none is found. */ static unsigned int first_packet_length(struct sock *sk) { struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; struct sk_buff *skb; unsigned int res; __skb_queue_head_init(&list_kill); spin_lock_bh(&rcvq->lock); while ((skb = skb_peek(rcvq)) != NULL && udp_lib_checksum_complete(skb)) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, IS_UDPLITE(sk)); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); atomic_inc(&sk->sk_drops); __skb_unlink(skb, rcvq); __skb_queue_tail(&list_kill, skb); } res = skb ? skb->len : 0; spin_unlock_bh(&rcvq->lock); if (!skb_queue_empty(&list_kill)) { bool slow = lock_sock_fast(sk); __skb_queue_purge(&list_kill); sk_mem_reclaim_partial(sk); unlock_sock_fast(sk, slow); } return res; } /* * IOCTL requests applicable to the UDP protocol */ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { unsigned int amount = first_packet_length(sk); if (amount) /* * We will only return the amount * of this packet since that is all * that will be read. */ amount -= sizeof(struct udphdr); return put_user(amount, (int __user *)arg); } default: return -ENOIOCTLCMD; } return 0; } EXPORT_SYMBOL(udp_ioctl); /* * This should be easy, if there is something there we * return it, otherwise we block. */ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); bool slow; /* * Check any passed addresses */ if (addr_len) *addr_len = sizeof(*sin); if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; } int udp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); /* * 1003.1g - break association. */ sk->sk_state = TCP_CLOSE; inet->inet_daddr = 0; inet->inet_dport = 0; sock_rps_reset_rxhash(sk); sk->sk_bound_dev_if = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { sk->sk_prot->unhash(sk); inet->inet_sport = 0; } sk_dst_reset(sk); return 0; } EXPORT_SYMBOL(udp_disconnect); void udp_lib_unhash(struct sock *sk) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2; hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock_bh(&hslot->lock); if (sk_nulls_del_node_init_rcu(sk)) { hslot->count--; inet_sk(sk)->inet_num = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_lock(&hslot2->lock); hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); } spin_unlock_bh(&hslot->lock); } } EXPORT_SYMBOL(udp_lib_unhash); /* * inet_rcv_saddr was changed, we must rehash secondary hash */ void udp_lib_rehash(struct sock *sk, u16 newhash) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2, *nhslot2; hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); nhslot2 = udp_hashslot2(udptable, newhash); udp_sk(sk)->udp_portaddr_hash = newhash; if (hslot2 != nhslot2) { hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); /* we must lock primary chain too */ spin_lock_bh(&hslot->lock); spin_lock(&hslot2->lock); hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); spin_lock(&nhslot2->lock); hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &nhslot2->head); nhslot2->count++; spin_unlock(&nhslot2->lock); spin_unlock_bh(&hslot->lock); } } } EXPORT_SYMBOL(udp_lib_rehash); static void udp_v4_rehash(struct sock *sk) { u16 new_hash = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); udp_lib_rehash(sk, new_hash); } static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; if (inet_sk(sk)->inet_daddr) sock_rps_save_rxhash(sk, skb); rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); trace_udp_fail_queue_rcv_skb(rc, sk); return -1; } return 0; } static struct static_key udp_encap_needed __read_mostly; void udp_encap_enable(void) { if (!static_key_enabled(&udp_encap_needed)) static_key_slow_inc(&udp_encap_needed); } EXPORT_SYMBOL(udp_encap_enable); /* returns: * -1: error * 0: success * >0: "udp encap" protocol resubmission * * Note that in the success and error cases, the skb is assumed to * have either been requeued or freed. */ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int rc; int is_udplite = IS_UDPLITE(sk); /* * Charge it to the socket, dropping if the queue is full. */ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); if (static_key_false(&udp_encap_needed) && up->encap_type) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* * This is an encapsulation socket so pass the skb to * the socket's udp_encap_rcv() hook. Otherwise, just * fall through and pass this up the UDP socket. * up->encap_rcv() returns the following value: * =0 if skb was successfully passed to the encap * handler or was discarded by it. * >0 if skb should be passed on to UDP. * <0 if skb should be resubmitted as proto -N */ /* if we're overly short, let UDP handle it */ encap_rcv = ACCESS_ONCE(up->encap_rcv); if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { int ret; ret = encap_rcv(sk, skb); if (ret <= 0) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); return -ret; } } /* FALLTHROUGH -- it's a UDP Packet */ } /* * UDP-Lite specific tests, ignored on UDP sockets */ if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { /* * MIB statistics other than incrementing the error count are * disabled for the following two types of errors: these depend * on the application settings, not on the functioning of the * protocol stack as such. * * RFC 3828 here recommends (sec 3.3): "There should also be a * way ... to ... at least let the receiving application block * delivery of packets with coverage values less than a value * provided by the application." */ if (up->pcrlen == 0) { /* full coverage was set */ LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } /* The next case involves violating the min. coverage requested * by the receiver. This is subtle: if receiver wants x and x is * greater than the buffersize/MTU then receiver will complain * that it wants x while sender emits packets of smaller size y. * Therefore the above ...()->partial_cov statement is essential. */ if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n", UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } if (rcu_access_pointer(sk->sk_filter) && udp_lib_checksum_complete(skb)) goto csum_error; if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) goto drop; rc = 0; ipv4_pktinfo_prepare(skb); bh_lock_sock(sk); if (!sock_owned_by_user(sk)) rc = __udp_queue_rcv_skb(sk, skb); else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); goto drop; } bh_unlock_sock(sk); return rc; csum_error: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; } static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { unsigned int i; struct sk_buff *skb1 = NULL; struct sock *sk; for (i = 0; i < count; i++) { sk = stack[i]; if (likely(skb1 == NULL)) skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); } if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) skb1 = NULL; } if (unlikely(skb1)) kfree_skb(skb1); } /* * Multicasts and broadcasts go to each listener. * * Note: called only from the BH handler context. */ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct udphdr *uh, __be32 saddr, __be32 daddr, struct udp_table *udptable) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); int dif; unsigned int i, count = 0; spin_lock(&hslot->lock); sk = sk_nulls_head(&hslot->head); dif = skb->dev->ifindex; sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); while (sk) { stack[count++] = sk; sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, uh->source, saddr, dif); if (unlikely(count == ARRAY_SIZE(stack))) { if (!sk) break; flush_stack(stack, count, skb, ~0); count = 0; } } /* * before releasing chain lock, we must take a reference on sockets */ for (i = 0; i < count; i++) sock_hold(stack[i]); spin_unlock(&hslot->lock); /* * do the slow work with no lock held */ if (count) { flush_stack(stack, count, skb, count - 1); for (i = 0; i < count; i++) sock_put(stack[i]); } else { kfree_skb(skb); } return 0; } /* Initialize UDP checksum. If exited with zero value (success), * CHECKSUM_UNNECESSARY means, that no more checks are required. * Otherwise, csum completion requires chacksumming packet body, * including udp header and folding it to skb->csum. */ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) { const struct iphdr *iph; int err; UDP_SKB_CB(skb)->partial_cov = 0; UDP_SKB_CB(skb)->cscov = skb->len; if (proto == IPPROTO_UDPLITE) { err = udplite_checksum_init(skb, uh); if (err) return err; } iph = ip_hdr(skb); if (uh->check == 0) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (!skb_csum_unnecessary(skb)) skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len, proto, 0); /* Probably, we should checksum udp header (it should be in cache * in any case) and data in tiny packets (< rx copybreak). */ return 0; } /* * All we need to do is get the socket, and then do a checksum. */ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct sock *sk; struct udphdr *uh; unsigned short ulen; struct rtable *rt = skb_rtable(skb); __be32 saddr, daddr; struct net *net = dev_net(skb->dev); /* * Validate the packet. */ if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto drop; /* No space for header. */ uh = udp_hdr(skb); ulen = ntohs(uh->len); saddr = ip_hdr(skb)->saddr; daddr = ip_hdr(skb)->daddr; if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) goto short_packet; uh = udp_hdr(skb); } if (udp4_csum_init(skb, uh, proto)) goto csum_error; if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) return __udp4_lib_mcast_deliver(net, skb, uh, saddr, daddr, udptable); sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk != NULL) { int ret; sk_mark_ll(sk, skb); ret = udp_queue_rcv_skb(sk, skb); sock_put(sk); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); /* No socket. Drop packet silently, if checksum is wrong */ if (udp_lib_checksum_complete(skb)) goto csum_error; UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* * Hmm. We got an UDP packet to a port to which we * don't wanna listen. Ignore it. */ kfree_skb(skb); return 0; short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), ulen, skb->len, &daddr, ntohs(uh->dest)); goto drop; csum_error: /* * RFC1122: OK. Discards the bad packet silently (as far as * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), ulen); UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); drop: UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } int udp_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); } void udp_destroy_sock(struct sock *sk) { struct udp_sock *up = udp_sk(sk); bool slow = lock_sock_fast(sk); udp_flush_pending_frames(sk); unlock_sock_fast(sk, slow); if (static_key_false(&udp_encap_needed) && up->encap_type) { void (*encap_destroy)(struct sock *sk); encap_destroy = ACCESS_ONCE(up->encap_destroy); if (encap_destroy) encap_destroy(sk); } } /* * Socket option code for UDP */ int udp_lib_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)) { struct udp_sock *up = udp_sk(sk); int val; int err = 0; int is_udplite = IS_UDPLITE(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; switch (optname) { case UDP_CORK: if (val != 0) { up->corkflag = 1; } else { up->corkflag = 0; lock_sock(sk); (*push_pending_frames)(sk); release_sock(sk); } break; case UDP_ENCAP: switch (val) { case 0: case UDP_ENCAP_ESPINUDP: case UDP_ENCAP_ESPINUDP_NON_IKE: up->encap_rcv = xfrm4_udp_encap_rcv; /* FALLTHROUGH */ case UDP_ENCAP_L2TPINUDP: up->encap_type = val; udp_encap_enable(); break; default: err = -ENOPROTOOPT; break; } break; /* * UDP-Lite's partial checksum coverage (RFC 3828). */ /* The sender sets actual checksum coverage length via this option. * The case coverage > packet length is handled by send module. */ case UDPLITE_SEND_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcslen = val; up->pcflag |= UDPLITE_SEND_CC; break; /* The receiver specifies a minimum checksum coverage value. To make * sense, this should be set to at least 8 (as done below). If zero is * used, this again means full checksum coverage. */ case UDPLITE_RECV_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Avoid silly minimal values. */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcrlen = val; up->pcflag |= UDPLITE_RECV_CC; break; default: err = -ENOPROTOOPT; break; } return err; } EXPORT_SYMBOL(udp_lib_setsockopt); int udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return ip_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return compat_ip_setsockopt(sk, level, optname, optval, optlen); } #endif int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct udp_sock *up = udp_sk(sk); int val, len; if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; switch (optname) { case UDP_CORK: val = up->corkflag; break; case UDP_ENCAP: val = up->encap_type; break; /* The following two cannot be changed on UDP sockets, the return is * always 0 (which corresponds to the full checksum coverage of UDP). */ case UDPLITE_SEND_CSCOV: val = up->pcslen; break; case UDPLITE_RECV_CSCOV: val = up->pcrlen; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } EXPORT_SYMBOL(udp_lib_getsockopt); int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return ip_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ip_getsockopt(sk, level, optname, optval, optlen); } #endif /** * udp_poll - wait for a UDP event. * @file - file struct * @sock - socket * @wait - poll table * * This is same as datagram poll, except for the special case of * blocking sockets. If application is using a blocking fd * and a packet with checksum error is in the queue; * then it could get return from select indicating data available * but then block when reading it. Add special case code * to work around these arguably broken applications. */ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; sock_rps_record_flow(sk); /* Check for false positives due to checksum errors */ if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) mask &= ~(POLLIN | POLLRDNORM); return mask; } EXPORT_SYMBOL(udp_poll); struct proto udp_prot = { .name = "UDP", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, .backlog_rcv = __udp_queue_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, .sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_rmem = &sysctl_udp_rmem_min, .obj_size = sizeof(struct udp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .h.udp_table = &udp_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, #endif .clear_sk = sk_prot_clear_portaddr_nulls, }; EXPORT_SYMBOL(udp_prot); /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); for (state->bucket = start; state->bucket <= state->udp_table->mask; ++state->bucket) { struct hlist_nulls_node *node; struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; if (hlist_nulls_empty(&hslot->head)) continue; spin_lock_bh(&hslot->lock); sk_nulls_for_each(sk, node, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == state->family) goto found; } spin_unlock_bh(&hslot->lock); } sk = NULL; found: return sk; } static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); do { sk = sk_nulls_next(sk); } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); if (!sk) { if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); return udp_get_first(seq, state->bucket + 1); } return sk; } static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = udp_get_first(seq, 0); if (sk) while (pos && (sk = udp_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } static void *udp_seq_start(struct seq_file *seq, loff_t *pos) { struct udp_iter_state *state = seq->private; state->bucket = MAX_UDP_PORTS; return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = udp_get_idx(seq, 0); else sk = udp_get_next(seq, v); ++*pos; return sk; } static void udp_seq_stop(struct seq_file *seq, void *v) { struct udp_iter_state *state = seq->private; if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); } int udp_seq_open(struct inode *inode, struct file *file) { struct udp_seq_afinfo *afinfo = PDE_DATA(inode); struct udp_iter_state *s; int err; err = seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct udp_iter_state)); if (err < 0) return err; s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; s->udp_table = afinfo->udp_table; return err; } EXPORT_SYMBOL(udp_seq_open); /* ------------------------------------------------------------------------ */ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) { struct proc_dir_entry *p; int rc = 0; afinfo->seq_ops.start = udp_seq_start; afinfo->seq_ops.next = udp_seq_next; afinfo->seq_ops.stop = udp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; } EXPORT_SYMBOL(udp_proc_register); void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } EXPORT_SYMBOL(udp_proc_unregister); /* ------------------------------------------------------------------------ */ static void udp4_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops), len); } int udp4_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { struct udp_iter_state *state = seq->private; int len; udp4_format_sock(v, seq, state->bucket, &len); seq_printf(seq, "%*s\n", 127 - len, ""); } return 0; } static const struct file_operations udp_afinfo_seq_fops = { .owner = THIS_MODULE, .open = udp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; /* ------------------------------------------------------------------------ */ static struct udp_seq_afinfo udp4_seq_afinfo = { .name = "udp", .family = AF_INET, .udp_table = &udp_table, .seq_fops = &udp_afinfo_seq_fops, .seq_ops = { .show = udp4_seq_show, }, }; static int __net_init udp4_proc_init_net(struct net *net) { return udp_proc_register(net, &udp4_seq_afinfo); } static void __net_exit udp4_proc_exit_net(struct net *net) { udp_proc_unregister(net, &udp4_seq_afinfo); } static struct pernet_operations udp4_net_ops = { .init = udp4_proc_init_net, .exit = udp4_proc_exit_net, }; int __init udp4_proc_init(void) { return register_pernet_subsys(&udp4_net_ops); } void udp4_proc_exit(void) { unregister_pernet_subsys(&udp4_net_ops); } #endif /* CONFIG_PROC_FS */ static __initdata unsigned long uhash_entries; static int __init set_uhash_entries(char *str) { ssize_t ret; if (!str) return 0; ret = kstrtoul(str, 0, &uhash_entries); if (ret) return 0; if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) uhash_entries = UDP_HTABLE_SIZE_MIN; return 1; } __setup("uhash_entries=", set_uhash_entries); void __init udp_table_init(struct udp_table *table, const char *name) { unsigned int i; table->hash = alloc_large_system_hash(name, 2 * sizeof(struct udp_hslot), uhash_entries, 21, /* one slot per 2 MB */ 0, &table->log, &table->mask, UDP_HTABLE_SIZE_MIN, 64 * 1024); table->hash2 = table->hash + (table->mask + 1); for (i = 0; i <= table->mask; i++) { INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); table->hash[i].count = 0; spin_lock_init(&table->hash[i].lock); } for (i = 0; i <= table->mask; i++) { INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i); table->hash2[i].count = 0; spin_lock_init(&table->hash2[i].lock); } } void __init udp_init(void) { unsigned long limit; udp_table_init(&udp_table, "UDP"); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_udp_mem[0] = limit / 4 * 3; sysctl_udp_mem[1] = limit; sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; sysctl_udp_rmem_min = SK_MEM_QUANTUM; sysctl_udp_wmem_min = SK_MEM_QUANTUM; } struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); int mac_len = skb->mac_len; int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); __be16 protocol = skb->protocol; netdev_features_t enc_features; int outer_hlen; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; skb->encapsulation = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = htons(ETH_P_TEB); /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); segs = skb_mac_gso_segment(skb, enc_features); if (!segs || IS_ERR(segs)) goto out; outer_hlen = skb_tnl_header_len(skb); skb = segs; do { struct udphdr *uh; int udp_offset = outer_hlen - tnl_hlen; skb->mac_len = mac_len; skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, udp_offset); uh = udp_hdr(skb); uh->len = htons(skb->len - udp_offset); /* csum segment if tunnel sets skb with csum. */ if (unlikely(uh->check)) { struct iphdr *iph = ip_hdr(skb); uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - udp_offset, IPPROTO_UDP, 0); uh->check = csum_fold(skb_checksum(skb, udp_offset, skb->len - udp_offset, 0)); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } skb->ip_summed = CHECKSUM_NONE; skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_5721_1
crossvul-cpp_data_good_924_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % OOO PPPP EEEE RRRR AA TTTTT III OOO N N % % O O P P E R R A A T I O O NN N % % O O PPPP EEE RRRR AAAA T I O O N N N % % O O P E R R A A T I O O N NN % % OOO P EEEE R RR A A T III OOO N N % % % % % % CLI Magick Option Methods % % % % Dragon Computing % % Anthony Thyssen % % September 2011 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Apply the given options (settings, and simple, or sequence operations) to % the given image(s) according to the current "image_info", "draw_info", and % "quantize_info" settings, stored in a special CLI Image Wand. % % The final goal is to allow the execution in a strict one option at a time % manner that is needed for 'pipelining and file scripting' of options in % IMv7. % % Anthony Thyssen, September 2011 */ /* Include declarations. */ #include "MagickWand/studio.h" #include "MagickWand/MagickWand.h" #include "MagickWand/magick-wand-private.h" #include "MagickWand/mogrify.h" #include "MagickWand/operation.h" #include "MagickWand/wand.h" #include "MagickWand/wandcli.h" #include "MagickWand/wandcli-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/image-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-private.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer-private.h" /* Constant declaration. */ static const char MogrifyAlphaColor[] = "#bdbdbd", /* slightly darker gray */ MogrifyBackgroundColor[] = "#fff", /* white */ MogrifyBorderColor[] = "#dfdfdf"; /* sRGB gray */ /* Define declarations. */ #define USE_WAND_METHODS 1 #define MAX_STACK_DEPTH 32 #define UNDEFINED_COMPRESSION_QUALITY 0UL /* FUTURE: why is this default so specific? */ #define DEFAULT_DISSIMILARITY_THRESHOLD "0.31830988618379067154" /* For Debugging Geometry Input */ #define ReportGeometry(flags,info) \ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", \ flags, info.rho, info.sigma, info.xi, info.psi ) /* ** Function to report on the progress of image operations */ static MagickBooleanType MonitorProgress(const char *text, const MagickOffsetType offset,const MagickSizeType extent, void *wand_unused(client_data)) { char message[MagickPathExtent], tag[MagickPathExtent]; const char *locale_message; register char *p; magick_unreferenced(client_data); if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent)) return(MagickTrue); if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0)) return(MagickTrue); (void) CopyMagickString(tag,text,MagickPathExtent); p=strrchr(tag,'/'); if (p != (char *) NULL) *p='\0'; (void) FormatLocaleString(message,MagickPathExtent,"Monitor/%s",tag); locale_message=GetLocaleMessage(message); if (locale_message == message) locale_message=tag; if (p == (char *) NULL) (void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r", locale_message,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); else (void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r", locale_message,p+1,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); if (offset == (MagickOffsetType) (extent-1)) (void) FormatLocaleFile(stderr,"\n"); (void) fflush(stderr); return(MagickTrue); } /* ** GetImageCache() will read an image into a image cache if not already ** present then return the image that is in the cache under that filename. */ static inline Image *GetImageCache(const ImageInfo *image_info,const char *path, ExceptionInfo *exception) { char key[MagickPathExtent]; ExceptionInfo *sans_exception; Image *image; ImageInfo *read_info; (void) FormatLocaleString(key,MagickPathExtent,"cache:%s",path); sans_exception=AcquireExceptionInfo(); image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (image != (Image *) NULL) return(image); read_info=CloneImageInfo(image_info); if (path != (const char *) NULL) (void) CopyMagickString(read_info->filename,path,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) (void) SetImageRegistry(ImageRegistryType,key,image,exception); return(image); } /* SparseColorOption() parse the complex -sparse-color argument into an an array of floating point values than call SparseColorImage(). Argument is a complex mix of floating-point pixel coodinates, and color specifications (or direct floating point numbers). The number of floats needed to represent a color varies depending on the current channel setting. This really should be in MagickCore, so that other API's can make use of it. */ static Image *SparseColorOption(const Image *image, const SparseColorMethod method,const char *arguments,ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p; double *sparse_arguments; Image *sparse_image; PixelInfo color; MagickBooleanType error; register size_t x; size_t number_arguments, number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Limit channels according to image add up number of values needed per color. */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && image->alpha_trait != UndefinedPixelTrait) number_colors++; /* Read string, to determine number of arguments needed, */ p=arguments; x=0; while( *p != '\0' ) { GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == ',' ) continue; if ( isalpha((int) token[0]) || token[0] == '#' ) x += number_colors; /* color argument found */ else x++; /* floating point argument */ } /* control points and color values */ if ((x % (2+number_colors)) != 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","'%s': %s", "sparse-color", "Invalid number of Arguments"); return( (Image *) NULL); } error=MagickFalse; number_arguments=x; /* Allocate and fill in the floating point arguments */ sparse_arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*sparse_arguments)); if (sparse_arguments == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","%s","SparseColorOption"); return( (Image *) NULL); } (void) memset(sparse_arguments,0,number_arguments* sizeof(*sparse_arguments)); p=arguments; x=0; while( *p != '\0' && x < number_arguments ) { /* X coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of X-coord"); error=MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* Y coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of Y-coord"); error=MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* color name or function given in string argument */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { /* Color string given */ (void) QueryColorCompliance(token,AllCompliance,&color, exception); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.blue; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) sparse_arguments[x++] = QuantumScale*color.black; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && image->alpha_trait != UndefinedPixelTrait) sparse_arguments[x++] = QuantumScale*color.alpha; } else { /* Colors given as a set of floating point values - experimental */ /* NB: token contains the first floating point value to use! */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && image->alpha_trait != UndefinedPixelTrait) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } } } if (error != MagickFalse) { sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return((Image *) NULL); } if (number_arguments != x) { sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","'%s': %s","sparse-color","Argument Parsing Error"); return((Image *) NULL); } /* Call the Sparse Color Interpolation function with the parsed arguments */ sparse_image=SparseColorImage(image,method,number_arguments,sparse_arguments, exception); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( sparse_image ); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C L I S e t t i n g O p t i o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLISettingOptionInfo() applies a single settings option into a CLI wand % holding the image_info, draw_info, quantize_info structures that will be % used when processing the images. % % These options do no require images to be present in the CLI wand for them % to be able to be set, in which case they will generally be applied to image % that are read in later % % Options handled by this function are listed in CommandOptions[] of % "option.c" that is one of "SettingOptionFlags" option flags. % % The format of the CLISettingOptionInfo method is: % % void CLISettingOptionInfo(MagickCLI *cli_wand, % const char *option, const char *arg1, const char *arg2) % % A description of each parameter follows: % % o cli_wand: structure holding settings to be applied % % o option: The option string to be set % % o arg1, arg2: optional argument strings to the operation % arg2 is currently only used by "-limit" % */ WandPrivate void CLISettingOptionInfo(MagickCLI *cli_wand, const char *option,const char *arg1n, const char *arg2n) { ssize_t parse; /* option argument parsing (string to value table lookup) */ const char /* percent escaped versions of the args */ *arg1, *arg2; #define _image_info (cli_wand->wand.image_info) #define _image (cli_wand->wand.images) #define _exception (cli_wand->wand.exception) #define _draw_info (cli_wand->draw_info) #define _quantize_info (cli_wand->quantize_info) #define IfSetOption (*option=='-') #define ArgBoolean IfSetOption ? MagickTrue : MagickFalse #define ArgBooleanNot IfSetOption ? MagickFalse : MagickTrue #define ArgBooleanString (IfSetOption?"true":"false") #define ArgOption(def) (IfSetOption?arg1:(const char *)(def)) assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- Setting Option: %s \"%s\" \"%s\"", option,arg1n,arg2n); arg1 = arg1n, arg2 = arg2n; #if 1 #define _process_flags (cli_wand->process_flags) #define _option_type ((CommandOptionFlags) cli_wand->command->flags) /* Interpret Percent Escapes in Arguments - using first image */ if ( (((_process_flags & ProcessInterpretProperities) != 0 ) || ((_option_type & AlwaysInterpretArgsFlag) != 0) ) && ((_option_type & NeverInterpretArgsFlag) == 0) ) { /* Interpret Percent escapes in argument 1 */ if (arg1n != (char *) NULL) { arg1=InterpretImageProperties(_image_info,_image,arg1n,_exception); if (arg1 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg1=arg1n; /* use the given argument as is */ } } if (arg2n != (char *) NULL) { arg2=InterpretImageProperties(_image_info,_image,arg2n,_exception); if (arg2 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg2=arg2n; /* use the given argument as is */ } } } #undef _process_flags #undef _option_type #endif switch (*(option+1)) { case 'a': { if (LocaleCompare("adjoin",option+1) == 0) { _image_info->adjoin = ArgBoolean; break; } if (LocaleCompare("affine",option+1) == 0) { CLIWandWarnReplaced("-draw 'affine ...'"); if (IfSetOption) (void) ParseAffineGeometry(arg1,&_draw_info->affine,_exception); else GetAffineMatrix(&_draw_info->affine); break; } if (LocaleCompare("antialias",option+1) == 0) { _image_info->antialias = _draw_info->stroke_antialias = _draw_info->text_antialias = ArgBoolean; break; } if (LocaleCompare("attenuate",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption("1.0")); break; } if (LocaleCompare("authenticate",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'b': { if (LocaleCompare("background",option+1) == 0) { /* FUTURE: both _image_info attribute & ImageOption in use! _image_info only used directly for generating new images. SyncImageSettings() used to set per-image attribute. FUTURE: if _image_info->background_color is not set then we should fall back to per-image background_color At this time -background will 'wipe out' the per-image background color! Better error handling of QueryColorCompliance() needed. */ (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) QueryColorCompliance(ArgOption(MogrifyBackgroundColor),AllCompliance, &_image_info->background_color,_exception); break; } if (LocaleCompare("bias",option+1) == 0) { /* FUTURE: bias OBSOLETED, replaced by Artifact "convolve:bias" as it is actually rarely used except in direct convolve operations Usage outside a direct convolve operation is actally non-sensible! SyncImageSettings() used to set per-image attribute. */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,"convolve:bias",ArgOption(NULL)); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) { /* Used as a image chromaticity setting SyncImageSettings() used to set per-image attribute. */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); break; } if (LocaleCompare("blue-primary",option+1) == 0) { /* Image chromaticity X,Y NB: Y=X if Y not defined Used by many coders including PNG SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("0.0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("bordercolor",option+1) == 0) { /* FUTURE: both _image_info attribute & ImageOption in use! SyncImageSettings() used to set per-image attribute. Better error checking of QueryColorCompliance(). */ if (IfSetOption) { (void) SetImageOption(_image_info,option+1,arg1); (void) QueryColorCompliance(arg1,AllCompliance, &_image_info->border_color,_exception); (void) QueryColorCompliance(arg1,AllCompliance, &_draw_info->border_color,_exception); break; } (void) DeleteImageOption(_image_info,option+1); (void) QueryColorCompliance(MogrifyBorderColor,AllCompliance, &_image_info->border_color,_exception); (void) QueryColorCompliance(MogrifyBorderColor,AllCompliance, &_draw_info->border_color,_exception); break; } if (LocaleCompare("box",option+1) == 0) { CLIWandWarnReplaced("-undercolor"); CLISettingOptionInfo(cli_wand,"-undercolor",arg1, arg2); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'c': { if (LocaleCompare("cache",option+1) == 0) { MagickSizeType limit; if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); limit=MagickResourceInfinity; if (LocaleCompare("unlimited",arg1) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(arg1,100.0); (void) SetMagickResourceLimit(MemoryResource,limit); (void) SetMagickResourceLimit(MapResource,2*limit); break; } if (LocaleCompare("caption",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("colorspace",option+1) == 0) { /* Setting used for new images via AquireImage() But also used as a SimpleImageOperator Undefined colorspace means don't modify images on read or as a operation */ parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option, arg1); _image_info->colorspace=(ColorspaceType) parse; break; } if (LocaleCompare("comment",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("compose",option+1) == 0) { /* FUTURE: _image_info should be used, SyncImageSettings() used to set per-image attribute. - REMOVE This setting should NOT be used to set image 'compose' "-layer" operators shoud use _image_info if defined otherwise they should use a per-image compose setting. */ parse = ParseCommandOption(MagickComposeOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedComposeOperator", option,arg1); _image_info->compose=(CompositeOperator) parse; (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("compress",option+1) == 0) { /* FUTURE: What should be used? _image_info or ImageOption ??? The former is more efficent, but Crisy prefers the latter! SyncImageSettings() used to set per-image attribute. The coders appears to use _image_info, not Image_Option however the image attribute (for save) is set from the ImageOption! Note that "undefined" is a different setting to "none". */ parse = ParseCommandOption(MagickCompressOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedImageCompression", option,arg1); _image_info->compression=(CompressionType) parse; (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'd': { if (LocaleCompare("debug",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("none"); parse = ParseCommandOption(MagickLogEventOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedEventType", option,arg1); (void) SetLogEventMask(arg1); _image_info->debug=IsEventLogging(); /* extract logging*/ cli_wand->wand.debug=IsEventLogging(); break; } if (LocaleCompare("define",option+1) == 0) { if (LocaleNCompare(arg1,"registry:",9) == 0) { if (IfSetOption) (void) DefineImageRegistry(StringRegistryType,arg1+9,_exception); else (void) DeleteImageRegistry(arg1+9); break; } /* DefineImageOption() equals SetImageOption() but with '=' */ if (IfSetOption) (void) DefineImageOption(_image_info,arg1); else if (DeleteImageOption(_image_info,arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"NoSuchOption",option,arg1); break; } if (LocaleCompare("delay",option+1) == 0) { /* Only used for new images via AcquireImage() FUTURE: Option should also be used for "-morph" (color morphing) */ arg1=ArgOption("0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("density",option+1) == 0) { /* FUTURE: strings used in _image_info attr and _draw_info! Basically as density can be in a XxY form! SyncImageSettings() used to set per-image attribute. */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) CloneString(&_image_info->density,ArgOption(NULL)); (void) CloneString(&_draw_info->density,_image_info->density); break; } if (LocaleCompare("depth",option+1) == 0) { /* This is also a SimpleImageOperator! for 8->16 vaule trunc !!!! SyncImageSettings() used to set per-image attribute. */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); _image_info->depth=IfSetOption?StringToUnsignedLong(arg1) :MAGICKCORE_QUANTUM_DEPTH; break; } if (LocaleCompare("direction",option+1) == 0) { /* Image Option is only used to set _draw_info */ arg1=ArgOption("undefined"); parse = ParseCommandOption(MagickDirectionOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedDirectionType", option,arg1); _draw_info->direction=(DirectionType) parse; (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("display",option+1) == 0) { (void) CloneString(&_image_info->server_name,ArgOption(NULL)); (void) CloneString(&_draw_info->server_name,_image_info->server_name); break; } if (LocaleCompare("dispose",option+1) == 0) { /* only used in setting new images */ arg1=ArgOption("undefined"); parse = ParseCommandOption(MagickDisposeOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedDisposeMethod", option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption("undefined")); break; } if (LocaleCompare("dissimilarity-threshold",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ arg1=ArgOption(DEFAULT_DISSIMILARITY_THRESHOLD); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("dither",option+1) == 0) { /* _image_info attr (on/off), _quantize_info attr (on/off) but also ImageInfo and _quantize_info method! FUTURE: merge the duality of the dithering options */ _image_info->dither = ArgBoolean; (void) SetImageOption(_image_info,option+1,ArgOption("none")); _quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,ArgOption("none")); if (_quantize_info->dither_method == NoDitherMethod) _image_info->dither = MagickFalse; break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'e': { if (LocaleCompare("encoding",option+1) == 0) { (void) CloneString(&_draw_info->encoding,ArgOption("undefined")); (void) SetImageOption(_image_info,option+1,_draw_info->encoding); break; } if (LocaleCompare("endian",option+1) == 0) { /* Both _image_info attr and ImageInfo */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickEndianOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedEndianType", option,arg1); /* FUTURE: check alloc/free of endian string! - remove? */ _image_info->endian=(EndianType) (*arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("extract",option+1) == 0) { (void) CloneString(&_image_info->extract,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'f': { if (LocaleCompare("family",option+1) == 0) { (void) CloneString(&_draw_info->family,ArgOption(NULL)); break; } if (LocaleCompare("features",option+1) == 0) { (void) SetImageOption(_image_info,"identify:features", ArgBooleanString); if (IfSetOption) (void) SetImageArtifact(_image,"verbose","true"); break; } if (LocaleCompare("fill",option+1) == 0) { /* Set "fill" OR "fill-pattern" in _draw_info The original fill color is preserved if a fill-pattern is given. That way it does not effect other operations that directly using the fill color and, can be retored using "+tile". */ MagickBooleanType status; ExceptionInfo *sans; PixelInfo color; arg1 = ArgOption("none"); /* +fill turns it off! */ (void) SetImageOption(_image_info,option+1,arg1); if (_draw_info->fill_pattern != (Image *) NULL) _draw_info->fill_pattern=DestroyImage(_draw_info->fill_pattern); /* is it a color or a image? -- ignore exceptions */ sans=AcquireExceptionInfo(); status=QueryColorCompliance(arg1,AllCompliance,&color,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) _draw_info->fill_pattern=GetImageCache(_image_info,arg1,_exception); else _draw_info->fill=color; break; } if (LocaleCompare("filter",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickFilterOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedImageFilter", option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("font",option+1) == 0) { (void) CloneString(&_draw_info->font,ArgOption(NULL)); (void) CloneString(&_image_info->font,_draw_info->font); break; } if (LocaleCompare("format",option+1) == 0) { /* FUTURE: why the ping test, you could set ping after this! */ /* register const char *q; for (q=strchr(arg1,'%'); q != (char *) NULL; q=strchr(q+1,'%')) if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL) _image_info->ping=MagickFalse; */ (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("fuzz",option+1) == 0) { /* Option used to set image fuzz! unless blank canvas (from color) Image attribute used for color compare operations SyncImageSettings() used to set per-image attribute. FUTURE: Can't find anything else using _image_info->fuzz directly! convert structure attribute to 'option' string */ arg1=ArgOption("0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); _image_info->fuzz=StringToDoubleInterval(arg1,(double) QuantumRange+1.0); (void) SetImageOption(_image_info,option+1,arg1); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'g': { if (LocaleCompare("gravity",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("none"); parse = ParseCommandOption(MagickGravityOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedGravityType", option,arg1); _draw_info->gravity=(GravityType) parse; (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("green-primary",option+1) == 0) { /* Image chromaticity X,Y NB: Y=X if Y not defined SyncImageSettings() used to set per-image attribute. Used directly by many coders */ arg1=ArgOption("0.0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'h': { if (LocaleCompare("highlight-color",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ (void) SetImageOption(_image_info,"compare:highlight-color", ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'i': { if (LocaleCompare("intensity",option+1) == 0) { arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickPixelIntensityOptions,MagickFalse, arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedIntensityType", option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("intent",option+1) == 0) { /* Only used by coders: MIFF, MPC, BMP, PNG and for image profile call to AcquireTransformThreadSet() SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickIntentOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedIntentType", option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("interlace",option+1) == 0) { /* _image_info is directly used by coders (so why an image setting?) SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickInterlaceOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedInterlaceType", option,arg1); _image_info->interlace=(InterlaceType) parse; (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1, ArgOption(NULL)); _draw_info->interline_spacing=StringToDouble(ArgOption("0"), (char **) NULL); break; } if (LocaleCompare("interpolate",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickInterpolateOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedInterpolateMethod", option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1, ArgOption(NULL)); _draw_info->interword_spacing=StringToDouble(ArgOption("0"),(char **) NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); _draw_info->kerning=StringToDouble(ArgOption("0"),(char **) NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'l': { if (LocaleCompare("label",option+1) == 0) { /* only used for new images - not in SyncImageOptions() */ (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("limit",option+1) == 0) { MagickSizeType limit; limit=MagickResourceInfinity; parse= ParseCommandOption(MagickResourceOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedResourceType", option,arg1); if (LocaleCompare("unlimited",arg2) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(arg2,100.0); (void) SetMagickResourceLimit((ResourceType)parse,limit); break; } if (LocaleCompare("log",option+1) == 0) { if (IfSetOption) { if ((strchr(arg1,'%') == (char *) NULL)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetLogFormat(arg1); } break; } if (LocaleCompare("lowlight-color",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ (void) SetImageOption(_image_info,"compare:lowlight-color", ArgOption(NULL)); break; } if (LocaleCompare("loop",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'm': { if (LocaleCompare("mattecolor",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) QueryColorCompliance(ArgOption(MogrifyAlphaColor), AllCompliance,&_image_info->matte_color,_exception); break; } if (LocaleCompare("metric",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ parse=ParseCommandOption(MagickMetricOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedMetricType", option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("moments",option+1) == 0) { (void) SetImageOption(_image_info,"identify:moments", ArgBooleanString); if (IfSetOption) (void) SetImageArtifact(_image,"verbose","true"); break; } if (LocaleCompare("monitor",option+1) == 0) { (void) SetImageInfoProgressMonitor(_image_info, IfSetOption? MonitorProgress: (MagickProgressMonitor) NULL, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { /* Setting (used by some input coders!) -- why? Warning: This is also Special '-type' SimpleOperator */ _image_info->monochrome= ArgBoolean; break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'o': { if (LocaleCompare("orient",option+1) == 0) { /* Is not used when defining for new images. This makes it more of a 'operation' than a setting FUTURE: make set meta-data operator instead. SyncImageSettings() used to set per-image attribute. */ parse=ParseCommandOption(MagickOrientationOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedImageOrientation", option,arg1); _image_info->orientation=(OrientationType)parse; (void) SetImageOption(_image_info,option+1, ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'p': { if (LocaleCompare("page",option+1) == 0) { /* Only used for new images and image generators. SyncImageSettings() used to set per-image attribute. ????? That last is WRONG!!!! FUTURE: adjust named 'page' sizes according density */ char *canonical_page, page[MagickPathExtent]; const char *image_option; MagickStatusType flags; RectangleInfo geometry; if (!IfSetOption) { (void) DeleteImageOption(_image_info,option+1); (void) CloneString(&_image_info->page,(char *) NULL); break; } (void) memset(&geometry,0,sizeof(geometry)); image_option=GetImageOption(_image_info,"page"); if (image_option != (const char *) NULL) flags=ParseAbsoluteGeometry(image_option,&geometry); canonical_page=GetPageGeometry(arg1); flags=ParseAbsoluteGeometry(canonical_page,&geometry); canonical_page=DestroyString(canonical_page); (void) FormatLocaleString(page,MagickPathExtent,"%lux%lu", (unsigned long) geometry.width,(unsigned long) geometry.height); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) FormatLocaleString(page,MagickPathExtent,"%lux%lu%+ld%+ld", (unsigned long) geometry.width,(unsigned long) geometry.height, (long) geometry.x,(long) geometry.y); (void) SetImageOption(_image_info,option+1,page); (void) CloneString(&_image_info->page,page); break; } if (LocaleCompare("ping",option+1) == 0) { _image_info->ping = ArgBoolean; break; } if (LocaleCompare("pointsize",option+1) == 0) { if (IfSetOption) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); _image_info->pointsize = _draw_info->pointsize = StringToDouble(arg1,(char **) NULL); } else { _image_info->pointsize=0.0; /* unset pointsize */ _draw_info->pointsize=12.0; } break; } if (LocaleCompare("precision",option+1) == 0) { arg1=ArgOption("-1"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetMagickPrecision(StringToInteger(arg1)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'q': { if (LocaleCompare("quality",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); _image_info->quality= IfSetOption ? StringToUnsignedLong(arg1) : UNDEFINED_COMPRESSION_QUALITY; (void) SetImageOption(_image_info,option+1,ArgOption("0")); break; } if (LocaleCompare("quantize",option+1) == 0) { /* Just a set direct in _quantize_info */ arg1=ArgOption("undefined"); parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace", option,arg1); _quantize_info->colorspace=(ColorspaceType)parse; break; } if (LocaleCompare("quiet",option+1) == 0) { /* FUTURE: if two -quiet is performed you can not do +quiet! This needs to be checked over thoughly. */ static WarningHandler warning_handler = (WarningHandler) NULL; WarningHandler tmp = SetWarningHandler((WarningHandler) NULL); if ( tmp != (WarningHandler) NULL) warning_handler = tmp; /* remember the old handler */ if (!IfSetOption) /* set the old handler */ warning_handler=SetWarningHandler(warning_handler); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'r': { if (LocaleCompare("red-primary",option+1) == 0) { /* Image chromaticity X,Y NB: Y=X if Y not defined Used by many coders SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("0.0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("regard-warnings",option+1) == 0) /* FUTURE: to be replaced by a 'fatal-level' type setting */ break; if (LocaleCompare("render",option+1) == 0) { /* _draw_info only setting */ _draw_info->render= ArgBooleanNot; break; } if (LocaleCompare("respect-parenthesis",option+1) == 0) { /* link image and setting stacks - option is itself saved on stack! */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 's': { if (LocaleCompare("sampling-factor",option+1) == 0) { /* FUTURE: should be converted to jpeg:sampling_factor */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) CloneString(&_image_info->sampling_factor,ArgOption(NULL)); break; } if (LocaleCompare("scene",option+1) == 0) { /* SyncImageSettings() used to set this as a per-image attribute. What ??? Why ???? */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); _image_info->scene=StringToUnsignedLong(ArgOption("0")); break; } if (LocaleCompare("seed",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); SetRandomSecretKey( IfSetOption ? (unsigned long) StringToUnsignedLong(arg1) : (unsigned long) time((time_t *) NULL)); break; } if (LocaleCompare("size",option+1) == 0) { /* FUTURE: string in _image_info -- convert to Option ??? Look at the special handling for "size" in SetImageOption() */ (void) CloneString(&_image_info->size,ArgOption(NULL)); break; } if (LocaleCompare("stretch",option+1) == 0) { arg1=ArgOption("undefined"); parse = ParseCommandOption(MagickStretchOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedStretchType", option,arg1); _draw_info->stretch=(StretchType) parse; break; } if (LocaleCompare("stroke",option+1) == 0) { /* set stroke color OR stroke-pattern UPDATE: ensure stroke color is not destroyed is a pattern is given. Just in case the color is also used for other purposes. */ MagickBooleanType status; ExceptionInfo *sans; PixelInfo color; arg1 = ArgOption("none"); /* +fill turns it off! */ (void) SetImageOption(_image_info,option+1,arg1); if (_draw_info->stroke_pattern != (Image *) NULL) _draw_info->stroke_pattern=DestroyImage(_draw_info->stroke_pattern); /* is it a color or a image? -- ignore exceptions */ sans=AcquireExceptionInfo(); status=QueryColorCompliance(arg1,AllCompliance,&color,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) _draw_info->stroke_pattern=GetImageCache(_image_info,arg1,_exception); else _draw_info->stroke=color; break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); _draw_info->stroke_width=StringToDouble(ArgOption("1.0"), (char **) NULL); break; } if (LocaleCompare("style",option+1) == 0) { arg1=ArgOption("undefined"); parse = ParseCommandOption(MagickStyleOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedStyleType", option,arg1); _draw_info->style=(StyleType) parse; break; } #if 0 if (LocaleCompare("subimage-search",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); break; } #endif if (LocaleCompare("synchronize",option+1) == 0) { /* FUTURE: syncronize to storage - but what does that mean? */ _image_info->synchronize = ArgBoolean; break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 't': { if (LocaleCompare("taint",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); break; } if (LocaleCompare("texture",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ /* FUTURE: move _image_info string to option splay-tree Other than "montage" what uses "texture" ???? */ (void) CloneString(&_image_info->texture,ArgOption(NULL)); break; } if (LocaleCompare("tile",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ _draw_info->fill_pattern=IfSetOption ?GetImageCache(_image_info,arg1,_exception) :DestroyImage(_draw_info->fill_pattern); break; } if (LocaleCompare("tile-offset",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. ??? */ arg1=ArgOption("0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("transparent-color",option+1) == 0) { /* FUTURE: both _image_info attribute & ImageOption in use! _image_info only used for generating new images. SyncImageSettings() used to set per-image attribute. Note that +transparent-color, means fall-back to image attribute so ImageOption is deleted, not set to a default. */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) QueryColorCompliance(ArgOption("none"),AllCompliance, &_image_info->transparent_color,_exception); break; } if (LocaleCompare("treedepth",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); _quantize_info->tree_depth=StringToUnsignedLong(ArgOption("0")); break; } if (LocaleCompare("type",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ parse=ParseCommandOption(MagickTypeOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedImageType", option,arg1); _image_info->type=(ImageType) parse; (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) QueryColorCompliance(ArgOption("none"),AllCompliance, &_draw_info->undercolor,_exception); break; } if (LocaleCompare("units",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. Should this effect _draw_info X and Y resolution? FUTURE: this probably should be part of the density setting */ parse=ParseCommandOption(MagickResolutionOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedUnitsType", option,arg1); _image_info->units=(ResolutionType) parse; (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { /* FUTURE: Remember all options become image artifacts _image_info->verbose is only used by coders. */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); _image_info->verbose= ArgBoolean; _image_info->ping=MagickFalse; /* verbose can't be a ping */ break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. This is VERY deep in the image caching structure. */ parse=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedVirtualPixelMethod", option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'w': { if (LocaleCompare("weight",option+1) == 0) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,arg1); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(arg1); _draw_info->weight=(size_t) weight; break; } if (LocaleCompare("white-point",option+1) == 0) { /* Used as a image chromaticity setting SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("0.0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } default: CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } /* clean up percent escape interpreted strings */ if ((arg1 && arg1n) && (arg1 != arg1n )) arg1=DestroyString((char *) arg1); if ((arg2 && arg2n) && (arg2 != arg2n )) arg2=DestroyString((char *) arg2); #undef _image_info #undef _exception #undef _draw_info #undef _quantize_info #undef IfSetOption #undef ArgBoolean #undef ArgBooleanNot #undef ArgBooleanString #undef ArgOption return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C L I S i m p l e O p e r a t o r I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLISimpleOperatorImages() applys one simple image operation given to all % the images in the CLI wand, using any per-image or global settings that was % previously saved in the CLI wand. % % It is assumed that any such settings are up-to-date. % % The format of the WandSimpleOperatorImages method is: % % MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand,const char *option, % const char *arg1, const char *arg2,ExceptionInfo *exception) % % A description of each parameter follows: % % o cli_wand: structure holding settings and images to be operated on % % o option: The option string for the operation % % o arg1, arg2: optional argument strings to the operation % */ /* CLISimpleOperatorImage() is an Internal subrountine to apply one simple image operation to the current image pointed to by the CLI wand. The image in the list may be modified in three different ways... * directly modified (EG: -negate, -gamma, -level, -annotate, -draw), * replaced by a new image (EG: -spread, -resize, -rotate, -morphology) * one image replace by a list of images (-separate and -crop only!) In each case the result replaces the single original image in the list, as well as the pointer to the modified image (last image added if replaced by a list of images) is returned. As the image pointed to may be replaced, the first image in the list may also change. GetFirstImageInList() should be used by caller if they wish return the Image pointer to the first image in list. */ static MagickBooleanType CLISimpleOperatorImage(MagickCLI *cli_wand, const char *option, const char *arg1n, const char *arg2n, ExceptionInfo *exception) { Image * new_image; GeometryInfo geometry_info; RectangleInfo geometry; MagickStatusType flags; ssize_t parse; const char /* percent escaped versions of the args */ *arg1, *arg2; #define _image_info (cli_wand->wand.image_info) #define _image (cli_wand->wand.images) #define _exception (cli_wand->wand.exception) #define _draw_info (cli_wand->draw_info) #define _quantize_info (cli_wand->quantize_info) #define _process_flags (cli_wand->process_flags) #define _option_type ((CommandOptionFlags) cli_wand->command->flags) #define IfNormalOp (*option=='-') #define IfPlusOp (*option!='-') #define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse #define IsPlusOp IfNormalOp ? MagickFalse : MagickTrue assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); assert(_image != (Image *) NULL); /* an image must be present */ if (cli_wand->wand.debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",cli_wand->wand.name); arg1 = arg1n, arg2 = arg2n; /* Interpret Percent Escapes in Arguments - using first image */ if ( (((_process_flags & ProcessInterpretProperities) != 0 ) || ((_option_type & AlwaysInterpretArgsFlag) != 0) ) && ((_option_type & NeverInterpretArgsFlag) == 0) ) { /* Interpret Percent escapes in argument 1 */ if (arg1n != (char *) NULL) { arg1=InterpretImageProperties(_image_info,_image,arg1n,_exception); if (arg1 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg1=arg1n; /* use the given argument as is */ } } if (arg2n != (char *) NULL) { arg2=InterpretImageProperties(_image_info,_image,arg2n,_exception); if (arg2 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg2=arg2n; /* use the given argument as is */ } } } #undef _process_flags #undef _option_type #if 0 (void) FormatLocaleFile(stderr, "CLISimpleOperatorImage: \"%s\" \"%s\" \"%s\"\n",option,arg1,arg2); #endif new_image = (Image *) NULL; /* the replacement image, if not null at end */ SetGeometryInfo(&geometry_info); switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=AdaptiveBlurImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { /* FUTURE: Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=AdaptiveResizeImage(_image,geometry.width,geometry.height, _exception); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=AdaptiveSharpenImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("alpha",option+1) == 0) { parse=ParseCommandOption(MagickAlphaChannelOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedAlphaChannelOption", option,arg1); (void) SetImageAlphaChannel(_image,(AlphaChannelOption) parse, _exception); break; } if (LocaleCompare("annotate",option+1) == 0) { char geometry[MagickPathExtent]; SetGeometryInfo(&geometry_info); flags=ParseGeometry(arg1,&geometry_info); if (flags == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; (void) CloneString(&_draw_info->text,arg2); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", geometry_info.xi,geometry_info.psi); (void) CloneString(&_draw_info->geometry,geometry); _draw_info->affine.sx=cos(DegreesToRadians( fmod(geometry_info.rho,360.0))); _draw_info->affine.rx=sin(DegreesToRadians( fmod(geometry_info.rho,360.0))); _draw_info->affine.ry=(-sin(DegreesToRadians( fmod(geometry_info.sigma,360.0)))); _draw_info->affine.sy=cos(DegreesToRadians( fmod(geometry_info.sigma,360.0))); (void) AnnotateImage(_image,_draw_info,_exception); GetAffineMatrix(&_draw_info->affine); break; } if (LocaleCompare("auto-gamma",option+1) == 0) { (void) AutoGammaImage(_image,_exception); break; } if (LocaleCompare("auto-level",option+1) == 0) { (void) AutoLevelImage(_image,_exception); break; } if (LocaleCompare("auto-orient",option+1) == 0) { new_image=AutoOrientImage(_image,_image->orientation,_exception); break; } if (LocaleCompare("auto-threshold",option+1) == 0) { AutoThresholdMethod method; method=(AutoThresholdMethod) ParseCommandOption( MagickAutoThresholdOptions,MagickFalse,arg1); (void) AutoThresholdImage(_image,method,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'b': { if (LocaleCompare("black-threshold",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) BlackThresholdImage(_image,arg1,_exception); break; } if (LocaleCompare("blue-shift",option+1) == 0) { geometry_info.rho=1.5; if (IfNormalOp) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); } new_image=BlueShiftImage(_image,geometry_info.rho,_exception); break; } if (LocaleCompare("blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=BlurImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("border",option+1) == 0) { CompositeOperator compose; const char* value; flags=ParsePageGeometry(_image,arg1,&geometry,_exception); if ((flags & (WidthValue | HeightValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); compose=OverCompositeOp; value=GetImageOption(_image_info,"compose"); if (value != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,value); new_image=BorderImage(_image,&geometry,compose,_exception); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { double brightness, contrast; GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); brightness=geometry_info.rho; contrast=0.0; if ((flags & SigmaValue) != 0) contrast=geometry_info.sigma; (void) BrightnessContrastImage(_image,brightness,contrast, _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'c': { if (LocaleCompare("canny",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=10; if ((flags & PsiValue) == 0) geometry_info.psi=30; if ((flags & PercentValue) != 0) { geometry_info.xi/=100.0; geometry_info.psi/=100.0; } new_image=CannyEdgeImage(_image,geometry_info.rho,geometry_info.sigma, geometry_info.xi,geometry_info.psi,_exception); break; } if (LocaleCompare("cdl",option+1) == 0) { char *color_correction_collection; /* Note: arguments do not have percent escapes expanded */ /* Color correct with a color decision list. */ color_correction_collection=FileToString(arg1,~0UL,_exception); if (color_correction_collection == (char *) NULL) break; (void) ColorDecisionListImage(_image,color_correction_collection, _exception); break; } if (LocaleCompare("channel",option+1) == 0) { if (IfPlusOp) { (void) SetPixelChannelMask(_image,DefaultChannels); break; } parse=ParseChannelOption(arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedChannelType",option, arg1); (void) SetPixelChannelMask(_image,(ChannelType) parse); break; } if (LocaleCompare("charcoal",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; new_image=CharcoalImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("chop",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseGravityGeometry(_image,arg1,&geometry,_exception); new_image=ChopImage(_image,&geometry,_exception); break; } if (LocaleCompare("clahe",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseGeometry(arg1,&geometry_info); flags=ParseRegionGeometry(_image,arg1,&geometry,_exception); (void) CLAHEImage(_image,geometry.width,geometry.height, (size_t) geometry.x,geometry_info.psi,_exception); break; } if (LocaleCompare("clamp",option+1) == 0) { (void) ClampImage(_image,_exception); break; } if (LocaleCompare("clip",option+1) == 0) { if (IfNormalOp) (void) ClipImage(_image,_exception); else /* "+mask" remove the write mask */ (void) SetImageMask(_image,WritePixelMask,(Image *) NULL, _exception); break; } if (LocaleCompare("clip-mask",option+1) == 0) { Image *clip_mask; if (IfPlusOp) { /* use "+clip-mask" Remove the write mask for -clip-path */ (void) SetImageMask(_image,WritePixelMask,(Image *) NULL,_exception); break; } clip_mask=GetImageCache(_image_info,arg1,_exception); if (clip_mask == (Image *) NULL) break; (void) SetImageMask(_image,WritePixelMask,clip_mask,_exception); clip_mask=DestroyImage(clip_mask); break; } if (LocaleCompare("clip-path",option+1) == 0) { (void) ClipImagePath(_image,arg1,IsNormalOp,_exception); /* Note: Use "+clip-mask" remove the write mask added */ break; } if (LocaleCompare("colorize",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ColorizeImage(_image,arg1,&_draw_info->fill,_exception); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel; kernel=AcquireKernelInfo(arg1,exception); if (kernel == (KernelInfo *) NULL) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ColorMatrixImage(_image,kernel,_exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("colors",option+1) == 0) { /* Reduce the number of colors in the image. FUTURE: also provide 'plus version with image 'color counts' */ _quantize_info->number_colors=StringToUnsignedLong(arg1); if (_quantize_info->number_colors == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((_image->storage_class == DirectClass) || _image->colors > _quantize_info->number_colors) (void) QuantizeImage(_quantize_info,_image,_exception); else (void) CompressImageColormap(_image,_exception); break; } if (LocaleCompare("colorspace",option+1) == 0) { /* WARNING: this is both a image_info setting (already done) and a operator to change image colorspace. FUTURE: default colorspace should be sRGB! Unless some type of 'linear colorspace' mode is set. Note that +colorspace sets "undefined" or no effect on new images, but forces images already in memory back to RGB! That seems to be a little strange! */ (void) TransformImageColorspace(_image, IfNormalOp ? _image_info->colorspace : sRGBColorspace, _exception); break; } if (LocaleCompare("connected-components",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ConnectedComponentsImage(_image,(size_t) StringToInteger(arg1),(CCObjectInfo **) NULL,_exception); break; } if (LocaleCompare("contrast",option+1) == 0) { CLIWandWarnReplaced(IfNormalOp?"-level":"+level"); (void) ContrastImage(_image,IsNormalOp,_exception); break; } if (LocaleCompare("contrast-stretch",option+1) == 0) { double black_point, white_point; MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); black_point=geometry_info.rho; white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma : black_point; if ((flags & PercentValue) != 0) { black_point*=(double) _image->columns*_image->rows/100.0; white_point*=(double) _image->columns*_image->rows/100.0; } white_point=(double) _image->columns*_image->rows-white_point; (void) ContrastStretchImage(_image,black_point,white_point, _exception); break; } if (LocaleCompare("convolve",option+1) == 0) { double gamma; KernelInfo *kernel_info; register ssize_t j; kernel_info=AcquireKernelInfo(arg1,exception); if (kernel_info == (KernelInfo *) NULL) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); gamma=0.0; for (j=0; j < (ssize_t) (kernel_info->width*kernel_info->height); j++) gamma+=kernel_info->values[j]; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); for (j=0; j < (ssize_t) (kernel_info->width*kernel_info->height); j++) kernel_info->values[j]*=gamma; new_image=MorphologyImage(_image,CorrelateMorphology,1,kernel_info, _exception); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("crop",option+1) == 0) { /* WARNING: This can generate multiple images! */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=CropImageToTiles(_image,arg1,_exception); break; } if (LocaleCompare("cycle",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) CycleColormapImage(_image,(ssize_t) StringToLong(arg1), _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ StringInfo *passkey; passkey=FileToStringInfo(arg1,~0UL,_exception); if (passkey == (StringInfo *) NULL) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) PasskeyDecipherImage(_image,passkey,_exception); passkey=DestroyStringInfo(passkey); break; } if (LocaleCompare("depth",option+1) == 0) { /* The _image_info->depth setting has already been set We just need to apply it to all images in current sequence WARNING: Depth from 8 to 16 causes 'quantum rounding to images! That is it really is an operation, not a setting! Arrgghhh FUTURE: this should not be an operator!!! */ (void) SetImageDepth(_image,_image_info->depth,_exception); break; } if (LocaleCompare("deskew",option+1) == 0) { double threshold; if (IfNormalOp) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); threshold=StringToDoubleInterval(arg1,(double) QuantumRange+1.0); } else threshold=40.0*QuantumRange/100.0; new_image=DeskewImage(_image,threshold,_exception); break; } if (LocaleCompare("despeckle",option+1) == 0) { new_image=DespeckleImage(_image,_exception); break; } if (LocaleCompare("distort",option+1) == 0) { double *args; ssize_t count; parse = ParseCommandOption(MagickDistortOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedDistortMethod", option,arg1); if ((DistortMethod) parse == ResizeDistortion) { double resize_args[2]; /* Special Case - Argument is actually a resize geometry! ** Convert that to an appropriate distortion argument array. ** FUTURE: make a separate special resize operator Roll into a resize special operator */ if (IsGeometry(arg2) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidGeometry", option,arg2); (void) ParseRegionGeometry(_image,arg2,&geometry,_exception); resize_args[0]=(double) geometry.width; resize_args[1]=(double) geometry.height; new_image=DistortImage(_image,(DistortMethod) parse, (size_t)2,resize_args,MagickTrue,_exception); break; } /* convert argument string into an array of doubles */ args = StringToArrayOfDoubles(arg2,&count,_exception); if (args == (double *) NULL ) CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg2); new_image=DistortImage(_image,(DistortMethod) parse,(size_t) count,args,IsPlusOp,_exception); args=(double *) RelinquishMagickMemory(args); break; } if (LocaleCompare("draw",option+1) == 0) { (void) CloneString(&_draw_info->primitive,arg1); (void) DrawImage(_image,_draw_info,_exception); (void) CloneString(&_draw_info->primitive,(char *) NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'e': { if (LocaleCompare("edge",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=EdgeImage(_image,geometry_info.rho,_exception); break; } if (LocaleCompare("emboss",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=EmbossImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("encipher",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ StringInfo *passkey; passkey=FileToStringInfo(arg1,~0UL,_exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyEncipherImage(_image,passkey,_exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("enhance",option+1) == 0) { new_image=EnhanceImage(_image,_exception); break; } if (LocaleCompare("equalize",option+1) == 0) { (void) EqualizeImage(_image,_exception); break; } if (LocaleCompare("evaluate",option+1) == 0) { double constant; parse = ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator", option,arg1); if (IsGeometry(arg2) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2); constant=StringToDoubleInterval(arg2,(double) QuantumRange+1.0); (void) EvaluateImage(_image,(MagickEvaluateOperator)parse,constant, _exception); break; } if (LocaleCompare("extent",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseGravityGeometry(_image,arg1,&geometry,_exception); if (geometry.width == 0) geometry.width=_image->columns; if (geometry.height == 0) geometry.height=_image->rows; new_image=ExtentImage(_image,&geometry,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'f': { if (LocaleCompare("flip",option+1) == 0) { new_image=FlipImage(_image,_exception); break; } if (LocaleCompare("flop",option+1) == 0) { new_image=FlopImage(_image,_exception); break; } if (LocaleCompare("floodfill",option+1) == 0) { PixelInfo target; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParsePageGeometry(_image,arg1,&geometry,_exception); (void) QueryColorCompliance(arg2,AllCompliance,&target,_exception); (void) FloodfillPaintImage(_image,_draw_info,&target,geometry.x, geometry.y,IsPlusOp,_exception); break; } if (LocaleCompare("frame",option+1) == 0) { FrameInfo frame_info; CompositeOperator compose; const char* value; value=GetImageOption(_image_info,"compose"); compose=OverCompositeOp; /* use Over not _image->compose */ if (value != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,value); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParsePageGeometry(_image,arg1,&geometry,_exception); frame_info.width=geometry.width; frame_info.height=geometry.height; frame_info.outer_bevel=geometry.x; frame_info.inner_bevel=geometry.y; frame_info.x=(ssize_t) frame_info.width; frame_info.y=(ssize_t) frame_info.height; frame_info.width=_image->columns+2*frame_info.width; frame_info.height=_image->rows+2*frame_info.height; new_image=FrameImage(_image,&frame_info,compose,_exception); break; } if (LocaleCompare("function",option+1) == 0) { double *args; ssize_t count; parse=ParseCommandOption(MagickFunctionOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedFunction", option,arg1); /* convert argument string into an array of doubles */ args = StringToArrayOfDoubles(arg2,&count,_exception); if (args == (double *) NULL ) CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg2); (void) FunctionImage(_image,(MagickFunction)parse,(size_t) count,args, _exception); args=(double *) RelinquishMagickMemory(args); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { double constant; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); constant=StringToDouble(arg1,(char **) NULL); #if 0 /* Using Gamma, via a cache */ if (IfPlusOp) constant=PerceptibleReciprocal(constant); (void) GammaImage(_image,constant,_exception); #else /* Using Evaluate POW, direct update of values - more accurite */ if (IfNormalOp) constant=PerceptibleReciprocal(constant); (void) EvaluateImage(_image,PowEvaluateOperator,constant,_exception); _image->gamma*=StringToDouble(arg1,(char **) NULL); #endif /* Set gamma setting -- Old meaning of "+gamma" * _image->gamma=StringToDouble(arg1,(char **) NULL); */ break; } if (LocaleCompare("gaussian-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=GaussianBlurImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("gaussian",option+1) == 0) { CLIWandWarnReplaced("-gaussian-blur"); (void) CLISimpleOperatorImage(cli_wand,"-gaussian-blur",arg1,NULL,exception); } if (LocaleCompare("geometry",option+1) == 0) { /* Record Image offset for composition. (A Setting) Resize last _image. (ListOperator) -- DEPRECIATE FUTURE: Why if no 'offset' does this resize ALL images? Also why is the setting recorded in the IMAGE non-sense! */ if (IfPlusOp) { /* remove the previous composition geometry offset! */ if (_image->geometry != (char *) NULL) _image->geometry=DestroyString(_image->geometry); break; } if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseRegionGeometry(_image,arg1,&geometry,_exception); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) CloneString(&_image->geometry,arg1); else new_image=ResizeImage(_image,geometry.width,geometry.height, _image->filter,_exception); break; } if (LocaleCompare("grayscale",option+1) == 0) { parse=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedIntensityMethod", option,arg1); (void) GrayscaleImage(_image,(PixelIntensityMethod) parse,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'h': { if (LocaleCompare("hough-lines",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=40; new_image=HoughLineImage(_image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(size_t) geometry_info.xi,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'i': { if (LocaleCompare("identify",option+1) == 0) { const char *format, *text; format=GetImageOption(_image_info,"format"); if (format == (char *) NULL) { (void) IdentifyImage(_image,stdout,_image_info->verbose, _exception); break; } text=InterpretImageProperties(_image_info,_image,format,_exception); if (text == (char *) NULL) CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure", option); (void) fputs(text,stdout); text=DestroyString((char *)text); break; } if (LocaleCompare("implode",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ImplodeImage(_image,geometry_info.rho,_image->interpolate, _exception); break; } if (LocaleCompare("interpolative-resize",option+1) == 0) { /* FUTURE: New to IMv7 Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=InterpolativeResizeImage(_image,geometry.width, geometry.height,_image->interpolate,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'k': { if (LocaleCompare("kuwahara",option+1) == 0) { /* Edge preserving blur. */ flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho-0.5; new_image=KuwaharaImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'l': { if (LocaleCompare("lat",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; new_image=AdaptiveThresholdImage(_image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(double) geometry_info.xi, _exception); break; } if (LocaleCompare("level",option+1) == 0) { double black_point, gamma, white_point; MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) (QuantumRange/100.0); white_point*=(double) (QuantumRange/100.0); } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if (IfPlusOp || ((flags & AspectValue) != 0)) (void) LevelizeImage(_image,black_point,white_point,gamma,_exception); else (void) LevelImage(_image,black_point,white_point,gamma,_exception); break; } if (LocaleCompare("level-colors",option+1) == 0) { char token[MagickPathExtent]; const char *p; PixelInfo black_point, white_point; p=(const char *) arg1; GetNextToken(p,&p,MagickPathExtent,token); /* get black point color */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryColorCompliance(token,AllCompliance, &black_point,_exception); else (void) QueryColorCompliance("#000000",AllCompliance, &black_point,_exception); if (isalpha((int) token[0]) || (token[0] == '#')) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '\0') white_point=black_point; /* set everything to that color */ else { if ((isalpha((int) *token) == 0) && ((*token == '#') == 0)) GetNextToken(p,&p,MagickPathExtent,token); /* Get white point color. */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryColorCompliance(token,AllCompliance, &white_point,_exception); else (void) QueryColorCompliance("#ffffff",AllCompliance, &white_point,_exception); } (void) LevelImageColors(_image,&black_point,&white_point, IsPlusOp,_exception); break; } if (LocaleCompare("linear-stretch",option+1) == 0) { double black_point, white_point; MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); black_point=geometry_info.rho; white_point=(double) _image->columns*_image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) _image->columns*_image->rows/100.0; white_point*=(double) _image->columns*_image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) _image->columns*_image->rows- black_point; (void) LinearStretchImage(_image,black_point,white_point,_exception); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { /* FUTURE: Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseRegionGeometry(_image,arg1,&geometry,_exception); if ((flags & XValue) == 0) geometry.x=1; if ((flags & YValue) == 0) geometry.y=0; new_image=LiquidRescaleImage(_image,geometry.width, geometry.height,1.0*geometry.x,1.0*geometry.y,_exception); break; } if (LocaleCompare("local-contrast",option+1) == 0) { MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) geometry_info.rho=10; if ((flags & SigmaValue) == 0) geometry_info.sigma=12.5; new_image=LocalContrastImage(_image,geometry_info.rho, geometry_info.sigma,exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'm': { if (LocaleCompare("magnify",option+1) == 0) { new_image=MagnifyImage(_image,_exception); break; } if (LocaleCompare("map",option+1) == 0) { CLIWandWarnReplaced("-remap"); (void) CLISimpleOperatorImage(cli_wand,"-remap",NULL,NULL,exception); break; } if (LocaleCompare("mask",option+1) == 0) { Image *mask; if (IfPlusOp) { /* Remove a mask. */ (void) SetImageMask(_image,WritePixelMask,(Image *) NULL, _exception); break; } /* Set the image mask. */ mask=GetImageCache(_image_info,arg1,_exception); if (mask == (Image *) NULL) break; (void) SetImageMask(_image,WritePixelMask,mask,_exception); mask=DestroyImage(mask); break; } if (LocaleCompare("matte",option+1) == 0) { CLIWandWarnReplaced(IfNormalOp?"-alpha Set":"-alpha Off"); (void) SetImageAlphaChannel(_image,IfNormalOp ? SetAlphaChannel : DeactivateAlphaChannel, _exception); break; } if (LocaleCompare("mean-shift",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.10*QuantumRange; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; new_image=MeanShiftImage(_image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,geometry_info.xi,_exception); break; } if (LocaleCompare("median",option+1) == 0) { CLIWandWarnReplaced("-statistic Median"); (void) CLISimpleOperatorImage(cli_wand,"-statistic","Median",arg1,exception); break; } if (LocaleCompare("mode",option+1) == 0) { /* FUTURE: note this is also a special "montage" option */ CLIWandWarnReplaced("-statistic Mode"); (void) CLISimpleOperatorImage(cli_wand,"-statistic","Mode",arg1,exception); break; } if (LocaleCompare("modulate",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ModulateImage(_image,arg1,_exception); break; } if (LocaleCompare("monitor",option+1) == 0) { (void) SetImageProgressMonitor(_image, IfNormalOp ? MonitorProgress : (MagickProgressMonitor) NULL,(void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { (void) SetImageType(_image,BilevelType,_exception); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MagickPathExtent]; const char *p; KernelInfo *kernel; ssize_t iterations; p=arg1; GetNextToken(p,&p,MagickPathExtent,token); parse=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedFunction",option, arg1); iterations=1L; GetNextToken(p,&p,MagickPathExtent,token); if ((*p == ':') || (*p == ',')) GetNextToken(p,&p,MagickPathExtent,token); if ((*p != '\0')) iterations=(ssize_t) StringToLong(p); kernel=AcquireKernelInfo(arg2,exception); if (kernel == (KernelInfo *) NULL) CLIWandExceptArgBreak(OptionError,"UnabletoParseKernel",option,arg2); new_image=MorphologyImage(_image,(MorphologyMethod)parse,iterations, kernel,_exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("motion-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=MotionBlurImage(_image,geometry_info.rho,geometry_info.sigma, geometry_info.xi,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'n': { if (LocaleCompare("negate",option+1) == 0) { (void) NegateImage(_image, IsPlusOp, _exception); break; } if (LocaleCompare("noise",option+1) == 0) { double attenuate; const char* value; if (IfNormalOp) { CLIWandWarnReplaced("-statistic NonPeak"); (void) CLISimpleOperatorImage(cli_wand,"-statistic","NonPeak",arg1,exception); break; } parse=ParseCommandOption(MagickNoiseOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedNoiseType", option,arg1); attenuate=1.0; value=GetImageOption(_image_info,"attenuate"); if (value != (const char *) NULL) attenuate=StringToDouble(value,(char **) NULL); new_image=AddNoiseImage(_image,(NoiseType)parse,attenuate, _exception); break; } if (LocaleCompare("normalize",option+1) == 0) { (void) NormalizeImage(_image,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { PixelInfo target; (void) QueryColorCompliance(arg1,AllCompliance,&target,_exception); (void) OpaquePaintImage(_image,&target,&_draw_info->fill,IsPlusOp, _exception); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { (void) OrderedDitherImage(_image,arg1,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'p': { if (LocaleCompare("paint",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=OilPaintImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("perceptible",option+1) == 0) { (void) PerceptibleImage(_image,StringToDouble(arg1,(char **) NULL), _exception); break; } if (LocaleCompare("polaroid",option+1) == 0) { const char *caption; double angle; if (IfPlusOp) { RandomInfo *random_info; random_info=AcquireRandomInfo(); angle=22.5*(GetPseudoRandomValue(random_info)-0.5); random_info=DestroyRandomInfo(random_info); } else { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); angle=geometry_info.rho; } caption=GetImageProperty(_image,"caption",_exception); new_image=PolaroidImage(_image,_draw_info,caption,angle, _image->interpolate,_exception); break; } if (LocaleCompare("posterize",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) PosterizeImage(_image,(size_t) geometry_info.rho, _quantize_info->dither_method,_exception); break; } if (LocaleCompare("preview",option+1) == 0) { /* FUTURE: should be a 'Genesis' option? Option however is also in WandSettingOptionInfo() Why??? */ parse=ParseCommandOption(MagickPreviewOptions, MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedPreviewType", option,arg1); new_image=PreviewImage(_image,(PreviewType)parse,_exception); break; } if (LocaleCompare("profile",option+1) == 0) { const char *name; const StringInfo *profile; Image *profile_image; ImageInfo *profile_info; /* Note: arguments do not have percent escapes expanded */ if (IfPlusOp) { /* Remove a profile from the _image. */ (void) ProfileImage(_image,arg1,(const unsigned char *) NULL,0,_exception); break; } /* Associate a profile with the _image. */ profile_info=CloneImageInfo(_image_info); profile=GetImageProfile(_image,"iptc"); if (profile != (StringInfo *) NULL) profile_info->profile=(void *) CloneStringInfo(profile); profile_image=GetImageCache(profile_info,arg1,_exception); profile_info=DestroyImageInfo(profile_info); if (profile_image == (Image *) NULL) { StringInfo *profile; profile_info=CloneImageInfo(_image_info); (void) CopyMagickString(profile_info->filename,arg1, MagickPathExtent); profile=FileToStringInfo(profile_info->filename,~0UL,_exception); if (profile != (StringInfo *) NULL) { (void) SetImageInfo(profile_info,0,_exception); (void) ProfileImage(_image,profile_info->magick, GetStringInfoDatum(profile),(size_t) GetStringInfoLength(profile),_exception); profile=DestroyStringInfo(profile); } profile_info=DestroyImageInfo(profile_info); break; } ResetImageProfileIterator(profile_image); name=GetNextImageProfile(profile_image); while (name != (const char *) NULL) { profile=GetImageProfile(profile_image,name); if (profile != (StringInfo *) NULL) (void) ProfileImage(_image,name,GetStringInfoDatum(profile), (size_t) GetStringInfoLength(profile),_exception); name=GetNextImageProfile(profile_image); } profile_image=DestroyImage(profile_image); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'r': { if (LocaleCompare("raise",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParsePageGeometry(_image,arg1,&geometry,_exception); (void) RaiseImage(_image,&geometry,IsNormalOp,_exception); break; } if (LocaleCompare("random-threshold",option+1) == 0) { double min_threshold, max_threshold; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); min_threshold=0.0; max_threshold=(double) QuantumRange; flags=ParseGeometry(arg1,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(arg1,'%') != (char *) NULL) { max_threshold*=(double) (0.01*QuantumRange); min_threshold*=(double) (0.01*QuantumRange); } (void) RandomThresholdImage(_image,min_threshold,max_threshold, _exception); break; } if (LocaleCompare("range-threshold",option+1) == 0) { /* Range threshold image. */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseGeometry(arg1,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=geometry_info.sigma; if ((flags & PsiValue) == 0) geometry_info.psi=geometry_info.xi; if (strchr(arg1,'%') != (char *) NULL) { geometry_info.rho*=(double) (0.01*QuantumRange); geometry_info.sigma*=(double) (0.01*QuantumRange); geometry_info.xi*=(double) (0.01*QuantumRange); geometry_info.psi*=(double) (0.01*QuantumRange); } (void) RangeThresholdImage(_image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception); break; } if (LocaleCompare("read-mask",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ Image *mask; if (IfPlusOp) { /* Remove a mask. */ (void) SetImageMask(_image,ReadPixelMask,(Image *) NULL, _exception); break; } /* Set the image mask. */ mask=GetImageCache(_image_info,arg1,_exception); if (mask == (Image *) NULL) break; (void) SetImageMask(_image,ReadPixelMask,mask,_exception); mask=DestroyImage(mask); break; } if (LocaleCompare("recolor",option+1) == 0) { CLIWandWarnReplaced("-color-matrix"); (void) CLISimpleOperatorImage(cli_wand,"-color-matrix",arg1,NULL, exception); } if (LocaleCompare("region",option+1) == 0) { if (*option == '+') { (void) SetImageRegionMask(_image,WritePixelMask, (const RectangleInfo *) NULL,_exception); break; } if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseGravityGeometry(_image,arg1,&geometry,_exception); (void) SetImageRegionMask(_image,WritePixelMask,&geometry,_exception); break; } if (LocaleCompare("remap",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ Image *remap_image; remap_image=GetImageCache(_image_info,arg1,_exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(_quantize_info,_image,remap_image,_exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("repage",option+1) == 0) { if (IfNormalOp) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option, arg1); (void) ResetImagePage(_image,arg1); } else (void) ParseAbsoluteGeometry("0x0+0+0",&_image->page); break; } if (LocaleCompare("resample",option+1) == 0) { /* FUTURE: Roll into a resize special operation */ flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; new_image=ResampleImage(_image,geometry_info.rho, geometry_info.sigma,_image->filter,_exception); break; } if (LocaleCompare("resize",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=ResizeImage(_image,geometry.width,geometry.height, _image->filter,_exception); break; } if (LocaleCompare("roll",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParsePageGeometry(_image,arg1,&geometry,_exception); if ((flags & PercentValue) != 0) { geometry.x*=(double) _image->columns/100.0; geometry.y*=(double) _image->rows/100.0; } new_image=RollImage(_image,geometry.x,geometry.y,_exception); break; } if (LocaleCompare("rotate",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & GreaterValue) != 0 && (_image->columns <= _image->rows)) break; if ((flags & LessValue) != 0 && (_image->columns >= _image->rows)) break; new_image=RotateImage(_image,geometry_info.rho,_exception); break; } if (LocaleCompare("rotational-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=RotationalBlurImage(_image,geometry_info.rho,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 's': { if (LocaleCompare("sample",option+1) == 0) { /* FUTURE: Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=SampleImage(_image,geometry.width,geometry.height, _exception); break; } if (LocaleCompare("scale",option+1) == 0) { /* FUTURE: Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=ScaleImage(_image,geometry.width,geometry.height, _exception); break; } if (LocaleCompare("segment",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; (void) SegmentImage(_image,_image->colorspace, _image_info->verbose,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("selective-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; new_image=SelectiveBlurImage(_image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,_exception); break; } if (LocaleCompare("separate",option+1) == 0) { /* WARNING: This can generate multiple images! */ /* FUTURE - this may be replaced by a "-channel" method */ new_image=SeparateImages(_image,_exception); break; } if (LocaleCompare("sepia-tone",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=SepiaToneImage(_image,StringToDoubleInterval(arg1, (double) QuantumRange+1.0),_exception); break; } if (LocaleCompare("shade",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if (((flags & RhoValue) == 0) || ((flags & SigmaValue) == 0)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ShadeImage(_image,IsNormalOp,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("shadow",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=4.0; if ((flags & PsiValue) == 0) geometry_info.psi=4.0; new_image=ShadowImage(_image,geometry_info.rho,geometry_info.sigma, (ssize_t) ceil(geometry_info.xi-0.5),(ssize_t) ceil(geometry_info.psi-0.5),_exception); break; } if (LocaleCompare("sharpen",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.0; new_image=SharpenImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("shave",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParsePageGeometry(_image,arg1,&geometry,_exception); new_image=ShaveImage(_image,&geometry,_exception); break; } if (LocaleCompare("shear",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; new_image=ShearImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=(double) QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/ 100.0; (void) SigmoidalContrastImage(_image,IsNormalOp,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("sketch",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=SketchImage(_image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,_exception); break; } if (LocaleCompare("solarize",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SolarizeImage(_image,StringToDoubleInterval(arg1,(double) QuantumRange+1.0),_exception); break; } if (LocaleCompare("sparse-color",option+1) == 0) { parse= ParseCommandOption(MagickSparseColorOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedSparseColorMethod", option,arg1); new_image=SparseColorOption(_image,(SparseColorMethod)parse,arg2, _exception); break; } if (LocaleCompare("splice",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseGravityGeometry(_image,arg1,&geometry,_exception); new_image=SpliceImage(_image,&geometry,_exception); break; } if (LocaleCompare("spread",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2); new_image=SpreadImage(_image,_image->interpolate,geometry_info.rho, _exception); break; } if (LocaleCompare("statistic",option+1) == 0) { parse=ParseCommandOption(MagickStatisticOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedStatisticType", option,arg1); flags=ParseGeometry(arg2,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; new_image=StatisticImage(_image,(StatisticType)parse, (size_t) geometry_info.rho,(size_t) geometry_info.sigma, _exception); break; } if (LocaleCompare("strip",option+1) == 0) { (void) StripImage(_image,_exception); break; } if (LocaleCompare("swirl",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=SwirlImage(_image,geometry_info.rho, _image->interpolate,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 't': { if (LocaleCompare("threshold",option+1) == 0) { double threshold; threshold=(double) QuantumRange/2; if (IfNormalOp) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); threshold=StringToDoubleInterval(arg1,(double) QuantumRange+1.0); } (void) BilevelImage(_image,threshold,_exception); break; } if (LocaleCompare("thumbnail",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=ThumbnailImage(_image,geometry.width,geometry.height, _exception); break; } if (LocaleCompare("tint",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=TintImage(_image,arg1,&_draw_info->fill,_exception); break; } if (LocaleCompare("transform",option+1) == 0) { CLIWandWarnReplaced("+distort AffineProjection"); new_image=AffineTransformImage(_image,&_draw_info->affine,_exception); break; } if (LocaleCompare("transparent",option+1) == 0) { PixelInfo target; (void) QueryColorCompliance(arg1,AllCompliance,&target,_exception); (void) TransparentPaintImage(_image,&target,(Quantum) TransparentAlpha,IsPlusOp,_exception); break; } if (LocaleCompare("transpose",option+1) == 0) { new_image=TransposeImage(_image,_exception); break; } if (LocaleCompare("transverse",option+1) == 0) { new_image=TransverseImage(_image,_exception); break; } if (LocaleCompare("trim",option+1) == 0) { new_image=TrimImage(_image,_exception); break; } if (LocaleCompare("type",option+1) == 0) { /* Note that "type" setting should have already been defined */ (void) SetImageType(_image,_image_info->type,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'u': { if (LocaleCompare("unique",option+1) == 0) { /* FUTURE: move to SyncImageSettings() and AcqireImage()??? Option is not documented, bt appears to be for "identify". We may need a identify specific verbose! */ if (IsPlusOp) { (void) DeleteImageArtifact(_image,"identify:unique-colors"); break; } (void) SetImageArtifact(_image,"identify:unique-colors","true"); (void) SetImageArtifact(_image,"verbose","true"); break; } if (LocaleCompare("unique-colors",option+1) == 0) { new_image=UniqueImageColors(_image,_exception); break; } if (LocaleCompare("unsharp",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; if ((flags & PsiValue) == 0) geometry_info.psi=0.05; new_image=UnsharpMaskImage(_image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { /* FUTURE: move to SyncImageSettings() and AcquireImage()??? three places! ImageArtifact ImageOption _image_info->verbose Some how new images also get this artifact! */ (void) SetImageArtifact(_image,option+1, IfNormalOp ? "true" : "false" ); break; } if (LocaleCompare("vignette",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.1*_image->columns; if ((flags & PsiValue) == 0) geometry_info.psi=0.1*_image->rows; if ((flags & PercentValue) != 0) { geometry_info.xi*=(double) _image->columns/100.0; geometry_info.psi*=(double) _image->rows/100.0; } new_image=VignetteImage(_image,geometry_info.rho,geometry_info.sigma, (ssize_t) ceil(geometry_info.xi-0.5),(ssize_t) ceil(geometry_info.psi-0.5),_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'w': { if (LocaleCompare("wave",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=WaveImage(_image,geometry_info.rho,geometry_info.sigma, _image->interpolate,_exception); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & PercentValue) != 0) { geometry_info.rho=QuantumRange*geometry_info.rho/100.0; geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0; } if ((flags & SigmaValue) == 0) geometry_info.sigma=0.0; new_image=WaveletDenoiseImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("white-threshold",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) WhiteThresholdImage(_image,arg1,_exception); break; } if (LocaleCompare("write-mask",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ Image *mask; if (IfPlusOp) { /* Remove a mask. */ (void) SetImageMask(_image,WritePixelMask,(Image *) NULL, _exception); break; } /* Set the image mask. */ mask=GetImageCache(_image_info,arg1,_exception); if (mask == (Image *) NULL) break; (void) SetImageMask(_image,WritePixelMask,mask,_exception); mask=DestroyImage(mask); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } default: CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } /* clean up percent escape interpreted strings */ if (arg1 != arg1n ) arg1=DestroyString((char *)arg1); if (arg2 != arg2n ) arg2=DestroyString((char *)arg2); /* Replace current image with any image that was generated and set image point to last image (so image->next is correct) */ if (new_image != (Image *) NULL) ReplaceImageInListReturnLast(&_image,new_image); return(MagickTrue); #undef _image_info #undef _draw_info #undef _quantize_info #undef _image #undef _exception #undef IfNormalOp #undef IfPlusOp #undef IsNormalOp #undef IsPlusOp } WandPrivate MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand, const char *option,const char *arg1,const char *arg2,ExceptionInfo *exception) { #if !USE_WAND_METHODS size_t n, i; #endif assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); assert(cli_wand->wand.images != (Image *) NULL); /* images must be present */ if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- Simple Operator: %s \"%s\" \"%s\"", option,arg1,arg2); #if !USE_WAND_METHODS /* FUTURE add appropriate tracing */ i=0; n=GetImageListLength(cli_wand->wand.images); cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images); while (1) { i++; CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception); if ( cli_wand->wand.images->next == (Image *) NULL ) break; cli_wand->wand.images=cli_wand->wand.images->next; } assert( i == n ); cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images); #else MagickResetIterator(&cli_wand->wand); while (MagickNextImage(&cli_wand->wand) != MagickFalse) (void) CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception); MagickResetIterator(&cli_wand->wand); #endif return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C L I L i s t O p e r a t o r I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLIListOperatorImages() applies a single operation that is apply to the % entire image list as a whole. The result is often a complete replacment % of the image list with a completely new list, or with just a single image % result. % % The format of the MogrifyImage method is: % % MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand, % const char *option,const char *arg1,const char *arg2) % % A description of each parameter follows: % % o cli_wand: structure holding settings to be applied % % o option: The option string for the operation % % o arg1, arg2: optional argument strings to the operation % arg2 is currently not used % */ WandPrivate MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand, const char *option,const char *arg1n,const char *arg2n) { const char /* percent escaped versions of the args */ *arg1, *arg2; Image *new_images; MagickStatusType status; ssize_t parse; #define _image_info (cli_wand->wand.image_info) #define _images (cli_wand->wand.images) #define _exception (cli_wand->wand.exception) #define _draw_info (cli_wand->draw_info) #define _quantize_info (cli_wand->quantize_info) #define _process_flags (cli_wand->process_flags) #define _option_type ((CommandOptionFlags) cli_wand->command->flags) #define IfNormalOp (*option=='-') #define IfPlusOp (*option!='-') #define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); assert(_images != (Image *) NULL); /* _images must be present */ if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- List Operator: %s \"%s\" \"%s\"", option, arg1n == (const char *) NULL ? "null" : arg1n, arg2n == (const char *) NULL ? "null" : arg2n); arg1 = arg1n; arg2 = arg2n; /* Interpret Percent Escapes in Arguments - using first image */ if ( (((_process_flags & ProcessInterpretProperities) != 0 ) || ((_option_type & AlwaysInterpretArgsFlag) != 0) ) && ((_option_type & NeverInterpretArgsFlag) == 0) ) { /* Interpret Percent escapes in argument 1 */ if (arg1n != (char *) NULL) { arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception); if (arg1 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg1=arg1n; /* use the given argument as is */ } } if (arg2n != (char *) NULL) { arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg2=arg2n; /* use the given argument as is */ } } } #undef _process_flags #undef _option_type status=MagickTrue; new_images=NewImageList(); switch (*(option+1)) { case 'a': { if (LocaleCompare("append",option+1) == 0) { new_images=AppendImages(_images,IsNormalOp,_exception); break; } if (LocaleCompare("average",option+1) == 0) { CLIWandWarnReplaced("-evaluate-sequence Mean"); (void) CLIListOperatorImages(cli_wand,"-evaluate-sequence","Mean", NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'c': { if (LocaleCompare("channel-fx",option+1) == 0) { new_images=ChannelFxImage(_images,arg1,_exception); break; } if (LocaleCompare("clut",option+1) == 0) { Image *clut_image; /* FUTURE - make this a compose option, and thus can be used with layers compose or even compose last image over all other _images. */ new_images=RemoveFirstImageFromList(&_images); clut_image=RemoveLastImageFromList(&_images); /* FUTURE - produce Exception, rather than silent fail */ if (clut_image == (Image *) NULL) break; (void) ClutImage(new_images,clut_image,new_images->interpolate, _exception); clut_image=DestroyImage(clut_image); break; } if (LocaleCompare("coalesce",option+1) == 0) { new_images=CoalesceImages(_images,_exception); break; } if (LocaleCompare("combine",option+1) == 0) { parse=(ssize_t) _images->colorspace; if (_images->number_channels < GetImageListLength(_images)) parse=sRGBColorspace; if ( IfPlusOp ) parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option, arg1); new_images=CombineImages(_images,(ColorspaceType) parse,_exception); break; } if (LocaleCompare("compare",option+1) == 0) { double distortion; Image *image, *reconstruct_image; MetricType metric; /* Mathematically and visually annotate the difference between an image and its reconstruction. */ image=RemoveFirstImageFromList(&_images); reconstruct_image=RemoveFirstImageFromList(&_images); /* FUTURE - produce Exception, rather than silent fail */ if (reconstruct_image == (Image *) NULL) { image=DestroyImage(image); break; } metric=UndefinedErrorMetric; option=GetImageOption(_image_info,"metric"); if (option != (const char *) NULL) metric=(MetricType) ParseCommandOption(MagickMetricOptions, MagickFalse,option); new_images=CompareImages(image,reconstruct_image,metric,&distortion, _exception); (void) distortion; reconstruct_image=DestroyImage(reconstruct_image); image=DestroyImage(image); break; } if (LocaleCompare("complex",option+1) == 0) { parse=ParseCommandOption(MagickComplexOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator", option,arg1); new_images=ComplexImages(_images,(ComplexOperator) parse,_exception); break; } if (LocaleCompare("composite",option+1) == 0) { CompositeOperator compose; const char* value; MagickBooleanType clip_to_self; Image *mask_image, *source_image; RectangleInfo geometry; /* Compose value from "-compose" option only */ value=GetImageOption(_image_info,"compose"); if (value == (const char *) NULL) compose=OverCompositeOp; /* use Over not source_image->compose */ else compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,value); /* Get "clip-to-self" expert setting (false is normal) */ clip_to_self=GetCompositeClipToSelf(compose); value=GetImageOption(_image_info,"compose:clip-to-self"); if (value != (const char *) NULL) clip_to_self=IsStringTrue(value); value=GetImageOption(_image_info,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsStringFalse(value); /* deprecated */ new_images=RemoveFirstImageFromList(&_images); source_image=RemoveFirstImageFromList(&_images); if (source_image == (Image *) NULL) break; /* FUTURE - produce Exception, rather than silent fail */ /* FUTURE - this should not be here! - should be part of -geometry */ if (source_image->geometry != (char *) NULL) { RectangleInfo resize_geometry; (void) ParseRegionGeometry(source_image,source_image->geometry, &resize_geometry,_exception); if ((source_image->columns != resize_geometry.width) || (source_image->rows != resize_geometry.height)) { Image *resize_image; resize_image=ResizeImage(source_image,resize_geometry.width, resize_geometry.height,source_image->filter,_exception); if (resize_image != (Image *) NULL) { source_image=DestroyImage(source_image); source_image=resize_image; } } } SetGeometry(source_image,&geometry); (void) ParseAbsoluteGeometry(source_image->geometry,&geometry); GravityAdjustGeometry(new_images->columns,new_images->rows, new_images->gravity, &geometry); mask_image=RemoveFirstImageFromList(&_images); if (mask_image == (Image *) NULL) status&=CompositeImage(new_images,source_image,compose,clip_to_self, geometry.x,geometry.y,_exception); else { if ((compose == DisplaceCompositeOp) || (compose == DistortCompositeOp)) { status&=CompositeImage(source_image,mask_image, CopyGreenCompositeOp,MagickTrue,0,0,_exception); status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,_exception); } else { Image *clone_image; clone_image=CloneImage(new_images,0,0,MagickTrue,_exception); if (clone_image == (Image *) NULL) break; status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,_exception); status&=CompositeImage(new_images,mask_image, CopyAlphaCompositeOp,MagickTrue,0,0,_exception); status&=CompositeImage(clone_image,new_images,OverCompositeOp, clip_to_self,0,0,_exception); new_images=DestroyImage(new_images); new_images=clone_image; } mask_image=DestroyImage(mask_image); } source_image=DestroyImage(source_image); break; } if (LocaleCompare("copy",option+1) == 0) { Image *source_image; OffsetInfo offset; RectangleInfo geometry; /* Copy image pixels. */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if (IsGeometry(arg2) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParsePageGeometry(_images,arg2,&geometry,_exception); offset.x=geometry.x; offset.y=geometry.y; source_image=_images; if (source_image->next != (Image *) NULL) source_image=source_image->next; (void) ParsePageGeometry(source_image,arg1,&geometry,_exception); (void) CopyImagePixels(_images,source_image,&geometry,&offset, _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'd': { if (LocaleCompare("deconstruct",option+1) == 0) { CLIWandWarnReplaced("-layer CompareAny"); (void) CLIListOperatorImages(cli_wand,"-layer","CompareAny",NULL); break; } if (LocaleCompare("delete",option+1) == 0) { if (IfNormalOp) DeleteImages(&_images,arg1,_exception); else DeleteImages(&_images,"-1",_exception); break; } if (LocaleCompare("duplicate",option+1) == 0) { if (IfNormalOp) { const char *p; size_t number_duplicates; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option, arg1); number_duplicates=(size_t) StringToLong(arg1); p=strchr(arg1,','); if (p == (const char *) NULL) new_images=DuplicateImages(_images,number_duplicates,"-1", _exception); else new_images=DuplicateImages(_images,number_duplicates,p, _exception); } else new_images=DuplicateImages(_images,1,"-1",_exception); AppendImageToList(&_images, new_images); new_images=(Image *) NULL; break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'e': { if (LocaleCompare("evaluate-sequence",option+1) == 0) { parse=ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator", option,arg1); new_images=EvaluateImages(_images,(MagickEvaluateOperator) parse, _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'f': { if (LocaleCompare("fft",option+1) == 0) { new_images=ForwardFourierTransformImage(_images,IsNormalOp, _exception); break; } if (LocaleCompare("flatten",option+1) == 0) { /* REDIRECTED to use -layers flatten instead */ (void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL); break; } if (LocaleCompare("fx",option+1) == 0) { new_images=FxImage(_images,arg1,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) { /* FUTURE - make this a compose option (and thus layers compose ) or perhaps compose last image over all other _images. */ Image *hald_image; new_images=RemoveFirstImageFromList(&_images); hald_image=RemoveLastImageFromList(&_images); if (hald_image == (Image *) NULL) break; (void) HaldClutImage(new_images,hald_image,_exception); hald_image=DestroyImage(hald_image); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'i': { if (LocaleCompare("ift",option+1) == 0) { Image *magnitude_image, *phase_image; magnitude_image=RemoveFirstImageFromList(&_images); phase_image=RemoveFirstImageFromList(&_images); /* FUTURE - produce Exception, rather than silent fail */ if (phase_image == (Image *) NULL) break; new_images=InverseFourierTransformImage(magnitude_image,phase_image, IsNormalOp,_exception); magnitude_image=DestroyImage(magnitude_image); phase_image=DestroyImage(phase_image); break; } if (LocaleCompare("insert",option+1) == 0) { Image *insert_image, *index_image; ssize_t index; if (IfNormalOp && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); index=0; insert_image=RemoveLastImageFromList(&_images); if (IfNormalOp) index=(ssize_t) StringToLong(arg1); index_image=insert_image; if (index == 0) PrependImageToList(&_images,insert_image); else if (index == (ssize_t) GetImageListLength(_images)) AppendImageToList(&_images,insert_image); else { index_image=GetImageFromList(_images,index-1); if (index_image == (Image *) NULL) CLIWandExceptArgBreak(OptionError,"NoSuchImage",option,arg1); InsertImageInList(&index_image,insert_image); } _images=GetFirstImageInList(index_image); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'l': { if (LocaleCompare("layers",option+1) == 0) { parse=ParseCommandOption(MagickLayerOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedLayerMethod", option,arg1); switch ((LayerMethod) parse) { case CoalesceLayer: { new_images=CoalesceImages(_images,_exception); break; } case CompareAnyLayer: case CompareClearLayer: case CompareOverlayLayer: default: { new_images=CompareImagesLayers(_images,(LayerMethod) parse, _exception); break; } case MergeLayer: case FlattenLayer: case MosaicLayer: case TrimBoundsLayer: { new_images=MergeImageLayers(_images,(LayerMethod) parse, _exception); break; } case DisposeLayer: { new_images=DisposeImages(_images,_exception); break; } case OptimizeImageLayer: { new_images=OptimizeImageLayers(_images,_exception); break; } case OptimizePlusLayer: { new_images=OptimizePlusImageLayers(_images,_exception); break; } case OptimizeTransLayer: { OptimizeImageTransparency(_images,_exception); break; } case RemoveDupsLayer: { RemoveDuplicateLayers(&_images,_exception); break; } case RemoveZeroLayer: { RemoveZeroDelayLayers(&_images,_exception); break; } case OptimizeLayer: { /* General Purpose, GIF Animation Optimizer. */ new_images=CoalesceImages(_images,_exception); if (new_images == (Image *) NULL) break; _images=DestroyImageList(_images); _images=OptimizeImageLayers(new_images,_exception); if (_images == (Image *) NULL) break; new_images=DestroyImageList(new_images); OptimizeImageTransparency(_images,_exception); (void) RemapImages(_quantize_info,_images,(Image *) NULL, _exception); break; } case CompositeLayer: { Image *source; RectangleInfo geometry; CompositeOperator compose; const char* value; value=GetImageOption(_image_info,"compose"); compose=OverCompositeOp; /* Default to Over */ if (value != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,value); /* Split image sequence at the first 'NULL:' image. */ source=_images; while (source != (Image *) NULL) { source=GetNextImageInList(source); if ((source != (Image *) NULL) && (LocaleCompare(source->magick,"NULL") == 0)) break; } if (source != (Image *) NULL) { if ((GetPreviousImageInList(source) == (Image *) NULL) || (GetNextImageInList(source) == (Image *) NULL)) source=(Image *) NULL; else { /* Separate the two lists, junk the null: image. */ source=SplitImageList(source->previous); DeleteImageFromList(&source); } } if (source == (Image *) NULL) { (void) ThrowMagickException(_exception,GetMagickModule(), OptionError,"MissingNullSeparator","layers Composite"); break; } /* Adjust offset with gravity and virtual canvas. */ SetGeometry(_images,&geometry); (void) ParseAbsoluteGeometry(_images->geometry,&geometry); geometry.width=source->page.width != 0 ? source->page.width : source->columns; geometry.height=source->page.height != 0 ? source->page.height : source->rows; GravityAdjustGeometry(_images->page.width != 0 ? _images->page.width : _images->columns, _images->page.height != 0 ? _images->page.height : _images->rows,_images->gravity,&geometry); /* Compose the two image sequences together */ CompositeLayers(_images,compose,source,geometry.x,geometry.y, _exception); source=DestroyImageList(source); break; } } break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'm': { if (LocaleCompare("map",option+1) == 0) { CLIWandWarnReplaced("+remap"); (void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception); break; } if (LocaleCompare("metric",option+1) == 0) { (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("morph",option+1) == 0) { Image *morph_image; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); morph_image=MorphImages(_images,StringToUnsignedLong(arg1), _exception); if (morph_image == (Image *) NULL) break; _images=DestroyImageList(_images); _images=morph_image; break; } if (LocaleCompare("mosaic",option+1) == 0) { /* REDIRECTED to use -layers mosaic instead */ (void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'p': { if (LocaleCompare("poly",option+1) == 0) { double *args; ssize_t count; /* convert argument string into an array of doubles */ args = StringToArrayOfDoubles(arg1,&count,_exception); if (args == (double *) NULL ) CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg1); new_images=PolynomialImage(_images,(size_t) (count >> 1),args, _exception); args=(double *) RelinquishMagickMemory(args); break; } if (LocaleCompare("process",option+1) == 0) { /* FUTURE: better parsing using ScriptToken() from string ??? */ char **arguments; int j, number_arguments; arguments=StringToArgv(arg1,&number_arguments); if (arguments == (char **) NULL) break; if (strchr(arguments[1],'=') != (char *) NULL) { char breaker, quote, *token; const char *arguments; int next, status; size_t length; TokenInfo *token_info; /* Support old style syntax, filter="-option arg1". */ assert(arg1 != (const char *) NULL); length=strlen(arg1); token=(char *) NULL; if (~length >= (MagickPathExtent-1)) token=(char *) AcquireQuantumMemory(length+MagickPathExtent, sizeof(*token)); if (token == (char *) NULL) break; next=0; arguments=arg1; token_info=AcquireTokenInfo(); status=Tokenizer(token_info,0,token,length,arguments,"","=", "\"",'\0',&breaker,&next,&quote); token_info=DestroyTokenInfo(token_info); if (status == 0) { const char *argv; argv=(&(arguments[next])); (void) InvokeDynamicImageFilter(token,&_images,1,&argv, _exception); } token=DestroyString(token); break; } (void) SubstituteString(&arguments[1],"-",""); (void) InvokeDynamicImageFilter(arguments[1],&_images, number_arguments-2,(const char **) arguments+2,_exception); for (j=0; j < number_arguments; j++) arguments[j]=DestroyString(arguments[j]); arguments=(char **) RelinquishMagickMemory(arguments); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'r': { if (LocaleCompare("remap",option+1) == 0) { (void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception); break; } if (LocaleCompare("reverse",option+1) == 0) { ReverseImageList(&_images); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 's': { if (LocaleCompare("smush",option+1) == 0) { /* FUTURE: this option needs more work to make better */ ssize_t offset; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); offset=(ssize_t) StringToLong(arg1); new_images=SmushImages(_images,IsNormalOp,offset,_exception); break; } if (LocaleCompare("subimage",option+1) == 0) { Image *base_image, *compare_image; const char *value; MetricType metric; double similarity; RectangleInfo offset; base_image=GetImageFromList(_images,0); compare_image=GetImageFromList(_images,1); /* Comparision Metric */ metric=UndefinedErrorMetric; value=GetImageOption(_image_info,"metric"); if (value != (const char *) NULL) metric=(MetricType) ParseCommandOption(MagickMetricOptions, MagickFalse,value); new_images=SimilarityImage(base_image,compare_image,metric,0.0, &offset,&similarity,_exception); if (new_images != (Image *) NULL) { char result[MagickPathExtent]; (void) FormatLocaleString(result,MagickPathExtent,"%lf", similarity); (void) SetImageProperty(new_images,"subimage:similarity",result, _exception); (void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long) offset.x); (void) SetImageProperty(new_images,"subimage:x",result, _exception); (void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long) offset.y); (void) SetImageProperty(new_images,"subimage:y",result, _exception); (void) FormatLocaleString(result,MagickPathExtent, "%lux%lu%+ld%+ld",(unsigned long) offset.width,(unsigned long) offset.height,(long) offset.x,(long) offset.y); (void) SetImageProperty(new_images,"subimage:offset",result, _exception); } break; } if (LocaleCompare("swap",option+1) == 0) { Image *p, *q, *swap; ssize_t index, swap_index; index=(-1); swap_index=(-2); if (IfNormalOp) { GeometryInfo geometry_info; MagickStatusType flags; swap_index=(-1); flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); index=(ssize_t) geometry_info.rho; if ((flags & SigmaValue) != 0) swap_index=(ssize_t) geometry_info.sigma; } p=GetImageFromList(_images,index); q=GetImageFromList(_images,swap_index); if ((p == (Image *) NULL) || (q == (Image *) NULL)) { if (IfNormalOp) CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1) else CLIWandExceptionBreak(OptionError,"TwoOrMoreImagesRequired",option); } if (p == q) CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1); swap=CloneImage(p,0,0,MagickTrue,_exception); if (swap == (Image *) NULL) CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed", option,GetExceptionMessage(errno)); ReplaceImageInList(&p,CloneImage(q,0,0,MagickTrue,_exception)); ReplaceImageInList(&q,swap); _images=GetFirstImageInList(q); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } default: CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } /* clean up percent escape interpreted strings */ if (arg1 != arg1n ) arg1=DestroyString((char *)arg1); if (arg2 != arg2n ) arg2=DestroyString((char *)arg2); /* if new image list generated, replace existing image list */ if (new_images == (Image *) NULL) return(status == 0 ? MagickFalse : MagickTrue); _images=DestroyImageList(_images); _images=GetFirstImageInList(new_images); return(status == 0 ? MagickFalse : MagickTrue); #undef _image_info #undef _images #undef _exception #undef _draw_info #undef _quantize_info #undef IfNormalOp #undef IfPlusOp #undef IsNormalOp } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C L I N o I m a g e O p e r a t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLINoImageOperator() Applies operations that may not actually need images % in an image list. % % The classic operators of this type is "-read", which actually creates % images even when no images are present. Or image stack operators, which % can be applied (push or pop) to an empty image list. % % Note that these operators may involve other special 'option' prefix % characters other than '-' or '+', namely parenthesis and braces. % % The format of the CLINoImageOption method is: % % void CLINoImageOption(MagickCLI *cli_wand,const char *option, % const char *arg1, const char *arg2) % % A description of each parameter follows: % % o cli_wand: the main CLI Wand to use. (sometimes not required) % % o option: The special option (with any switch char) to process % % o arg1 & arg2: Argument for option, if required % Currently arg2 is not used. % */ WandPrivate void CLINoImageOperator(MagickCLI *cli_wand, const char *option,const char *arg1n,const char *arg2n) { const char /* percent escaped versions of the args */ *arg1, *arg2; #define _image_info (cli_wand->wand.image_info) #define _images (cli_wand->wand.images) #define _exception (cli_wand->wand.exception) #define _process_flags (cli_wand->process_flags) #define _option_type ((CommandOptionFlags) cli_wand->command->flags) #define IfNormalOp (*option=='-') #define IfPlusOp (*option!='-') assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- NoImage Operator: %s \"%s\" \"%s\"", option, arg1n != (char *) NULL ? arg1n : "", arg2n != (char *) NULL ? arg2n : ""); arg1 = arg1n; arg2 = arg2n; /* Interpret Percent Escapes in Arguments - using first image */ if ( (((_process_flags & ProcessInterpretProperities) != 0 ) || ((_option_type & AlwaysInterpretArgsFlag) != 0) ) && ((_option_type & NeverInterpretArgsFlag) == 0) ) { /* Interpret Percent escapes in argument 1 */ if (arg1n != (char *) NULL) { arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception); if (arg1 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg1=arg1n; /* use the given argument as is */ } } if (arg2n != (char *) NULL) { arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg2=arg2n; /* use the given argument as is */ } } } #undef _process_flags #undef _option_type do { /* break to exit code */ /* No-op options (ignore these) */ if (LocaleCompare("noop",option+1) == 0) /* zero argument */ break; if (LocaleCompare("sans",option+1) == 0) /* one argument */ break; if (LocaleCompare("sans0",option+1) == 0) /* zero argument */ break; if (LocaleCompare("sans1",option+1) == 0) /* one argument */ break; if (LocaleCompare("sans2",option+1) == 0) /* two arguments */ break; /* Image Reading */ if ( ( LocaleCompare("read",option+1) == 0 ) || ( LocaleCompare("--",option) == 0 ) ) { /* Do Glob filename Expansion for 'arg1' then read all images. * * Expansion handles '@', '~', '*', and '?' meta-characters while ignoring * (but attaching to the filenames in the generated argument list) any * [...] read modifiers that may be present. * * For example: It will expand '*.gif[20x20]' into a list such as * 'abc.gif[20x20]', 'foobar.gif[20x20]', 'xyzzy.gif[20x20]' * * NOTE: In IMv6 this was done globally across all images. This * meant you could include IM options in '@filename' lists, but you * could not include comments. Doing it only for image read makes * it far more secure. * * Note: arguments do not have percent escapes expanded for security * reasons. */ int argc; char **argv; ssize_t i; argc = 1; argv = (char **) &arg1; /* Expand 'glob' expressions in the given filename. Expansion handles any 'coder:' prefix, or read modifiers attached to the filename, including them in the resulting expanded list. */ if (ExpandFilenames(&argc,&argv) == MagickFalse) CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed", option,GetExceptionMessage(errno)); /* loop over expanded filename list, and read then all in */ for (i=0; i < (ssize_t) argc; i++) { Image * new_images; if (_image_info->ping != MagickFalse) new_images=PingImages(_image_info,argv[i],_exception); else new_images=ReadImages(_image_info,argv[i],_exception); AppendImageToList(&_images, new_images); argv[i]=DestroyString(argv[i]); } argv=(char **) RelinquishMagickMemory(argv); break; } /* Image Writing Note: Writing a empty image list is valid in specific cases */ if (LocaleCompare("write",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ char key[MagickPathExtent]; Image *write_images; ImageInfo *write_info; /* Need images, unless a "null:" output coder is used */ if ( _images == (Image *) NULL ) { if ( LocaleCompare(arg1,"null:") == 0 ) break; CLIWandExceptArgBreak(OptionError,"NoImagesForWrite",option,arg1); } (void) FormatLocaleString(key,MagickPathExtent,"cache:%s",arg1); (void) DeleteImageRegistry(key); write_images=_images; if (IfPlusOp) write_images=CloneImageList(_images,_exception); write_info=CloneImageInfo(_image_info); (void) WriteImages(write_info,write_images,arg1,_exception); write_info=DestroyImageInfo(write_info); if (IfPlusOp) write_images=DestroyImageList(write_images); break; } /* Parenthesis and Brace operations */ if (LocaleCompare("(",option) == 0) { /* stack 'push' images */ Stack *node; size_t size; size=0; node=cli_wand->image_list_stack; for ( ; node != (Stack *) NULL; node=node->next) size++; if ( size >= MAX_STACK_DEPTH ) CLIWandExceptionBreak(OptionError,"ParenthesisNestedTooDeeply",option); node=(Stack *) AcquireMagickMemory(sizeof(*node)); if (node == (Stack *) NULL) CLIWandExceptionBreak(ResourceLimitFatalError, "MemoryAllocationFailed",option); node->data = (void *)cli_wand->wand.images; node->next = cli_wand->image_list_stack; cli_wand->image_list_stack = node; cli_wand->wand.images = NewImageList(); /* handle respect-parenthesis */ if (IsStringTrue(GetImageOption(cli_wand->wand.image_info, "respect-parenthesis")) != MagickFalse) option="{"; /* fall-thru so as to push image settings too */ else break; /* fall thru to operation */ } if (LocaleCompare("{",option) == 0) { /* stack 'push' of image_info settings */ Stack *node; size_t size; size=0; node=cli_wand->image_info_stack; for ( ; node != (Stack *) NULL; node=node->next) size++; if ( size >= MAX_STACK_DEPTH ) CLIWandExceptionBreak(OptionError,"CurlyBracesNestedTooDeeply",option); node=(Stack *) AcquireMagickMemory(sizeof(*node)); if (node == (Stack *) NULL) CLIWandExceptionBreak(ResourceLimitFatalError, "MemoryAllocationFailed",option); node->data = (void *)cli_wand->wand.image_info; node->next = cli_wand->image_info_stack; cli_wand->image_info_stack = node; cli_wand->wand.image_info = CloneImageInfo(cli_wand->wand.image_info); if (cli_wand->wand.image_info == (ImageInfo *) NULL) { CLIWandException(ResourceLimitFatalError,"MemoryAllocationFailed", option); cli_wand->wand.image_info = (ImageInfo *)node->data; node = (Stack *)RelinquishMagickMemory(node); break; } break; } if (LocaleCompare(")",option) == 0) { /* pop images from stack */ Stack *node; node = (Stack *)cli_wand->image_list_stack; if ( node == (Stack *) NULL) CLIWandExceptionBreak(OptionError,"UnbalancedParenthesis",option); cli_wand->image_list_stack = node->next; AppendImageToList((Image **)&node->data,cli_wand->wand.images); cli_wand->wand.images= (Image *)node->data; node = (Stack *)RelinquishMagickMemory(node); /* handle respect-parenthesis - of the previous 'pushed' settings */ node = cli_wand->image_info_stack; if ( node != (Stack *) NULL) { if (IsStringTrue(GetImageOption( cli_wand->wand.image_info,"respect-parenthesis")) != MagickFalse) option="}"; /* fall-thru so as to pop image settings too */ else break; } else break; /* fall thru to next if */ } if (LocaleCompare("}",option) == 0) { /* pop image_info settings from stack */ Stack *node; node = (Stack *)cli_wand->image_info_stack; if ( node == (Stack *) NULL) CLIWandExceptionBreak(OptionError,"UnbalancedCurlyBraces",option); cli_wand->image_info_stack = node->next; (void) DestroyImageInfo(cli_wand->wand.image_info); cli_wand->wand.image_info = (ImageInfo *)node->data; node = (Stack *)RelinquishMagickMemory(node); GetDrawInfo(cli_wand->wand.image_info, cli_wand->draw_info); cli_wand->quantize_info=DestroyQuantizeInfo(cli_wand->quantize_info); cli_wand->quantize_info=AcquireQuantizeInfo(cli_wand->wand.image_info); break; } if (LocaleCompare("print",option+1) == 0) { (void) FormatLocaleFile(stdout,"%s",arg1); break; } if (LocaleCompare("set",option+1) == 0) { /* Settings are applied to each image in memory in turn (if any). While a option: only need to be applied once globally. NOTE: rguments have not been automatically percent expaneded */ /* escape the 'key' once only, using first image. */ arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception); if (arg1 == (char *) NULL) CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure", option); if (LocaleNCompare(arg1,"registry:",9) == 0) { if (IfPlusOp) { (void) DeleteImageRegistry(arg1+9); arg1=DestroyString((char *)arg1); break; } arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) { arg1=DestroyString((char *)arg1); CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure", option); } (void) SetImageRegistry(StringRegistryType,arg1+9,arg2,_exception); arg1=DestroyString((char *)arg1); arg2=DestroyString((char *)arg2); break; } if (LocaleNCompare(arg1,"option:",7) == 0) { /* delete equivelent artifact from all images (if any) */ if (_images != (Image *) NULL) { MagickResetIterator(&cli_wand->wand); while (MagickNextImage(&cli_wand->wand) != MagickFalse) (void) DeleteImageArtifact(_images,arg1+7); MagickResetIterator(&cli_wand->wand); } /* now set/delete the global option as needed */ /* FUTURE: make escapes in a global 'option:' delayed */ arg2=(char *) NULL; if (IfNormalOp) { arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) CLIWandExceptionBreak(OptionWarning, "InterpretPropertyFailure",option); } (void) SetImageOption(_image_info,arg1+7,arg2); arg1=DestroyString((char *)arg1); arg2=DestroyString((char *)arg2); break; } /* Set Artifacts/Properties/Attributes all images (required) */ if ( _images == (Image *) NULL ) CLIWandExceptArgBreak(OptionWarning,"NoImageForProperty",option,arg1); MagickResetIterator(&cli_wand->wand); while (MagickNextImage(&cli_wand->wand) != MagickFalse) { arg2=(char *) NULL; if (IfNormalOp) { arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) CLIWandExceptionBreak(OptionWarning, "InterpretPropertyFailure",option); } if (LocaleNCompare(arg1,"artifact:",9) == 0) (void) SetImageArtifact(_images,arg1+9,arg2); else if (LocaleNCompare(arg1,"property:",9) == 0) (void) SetImageProperty(_images,arg1+9,arg2,_exception); else (void) SetImageProperty(_images,arg1,arg2,_exception); arg2=DestroyString((char *)arg2); } MagickResetIterator(&cli_wand->wand); arg1=DestroyString((char *)arg1); break; } if (LocaleCompare("clone",option+1) == 0) { Image *new_images; if (*option == '+') arg1=AcquireString("-1"); if (IsSceneGeometry(arg1,MagickFalse) == MagickFalse) CLIWandExceptionBreak(OptionError,"InvalidArgument",option); if ( cli_wand->image_list_stack == (Stack *) NULL) CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option); new_images = (Image *)cli_wand->image_list_stack->data; if (new_images == (Image *) NULL) CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option); new_images=CloneImages(new_images,arg1,_exception); if (new_images == (Image *) NULL) CLIWandExceptionBreak(OptionError,"NoSuchImage",option); AppendImageToList(&_images,new_images); break; } /* Informational Operations. Note that these do not require either a cli-wand or images! Though currently a cli-wand much be provided regardless. */ if (LocaleCompare("version",option+1) == 0) { ListMagickVersion(stdout); break; } if (LocaleCompare("list",option+1) == 0) { /* FUTURE: This 'switch' should really be part of MagickCore */ ssize_t list; list=ParseCommandOption(MagickListOptions,MagickFalse,arg1); if ( list < 0 ) { CLIWandExceptionArg(OptionError,"UnrecognizedListType",option,arg1); break; } switch (list) { case MagickCoderOptions: { (void) ListCoderInfo((FILE *) NULL,_exception); break; } case MagickColorOptions: { (void) ListColorInfo((FILE *) NULL,_exception); break; } case MagickConfigureOptions: { (void) ListConfigureInfo((FILE *) NULL,_exception); break; } case MagickDelegateOptions: { (void) ListDelegateInfo((FILE *) NULL,_exception); break; } case MagickFontOptions: { (void) ListTypeInfo((FILE *) NULL,_exception); break; } case MagickFormatOptions: (void) ListMagickInfo((FILE *) NULL,_exception); break; case MagickLocaleOptions: (void) ListLocaleInfo((FILE *) NULL,_exception); break; case MagickLogOptions: (void) ListLogInfo((FILE *) NULL,_exception); break; case MagickMagicOptions: (void) ListMagicInfo((FILE *) NULL,_exception); break; case MagickMimeOptions: (void) ListMimeInfo((FILE *) NULL,_exception); break; case MagickModuleOptions: (void) ListModuleInfo((FILE *) NULL,_exception); break; case MagickPolicyOptions: (void) ListPolicyInfo((FILE *) NULL,_exception); break; case MagickResourceOptions: (void) ListMagickResourceInfo((FILE *) NULL,_exception); break; case MagickThresholdOptions: (void) ListThresholdMaps((FILE *) NULL,_exception); break; default: (void) ListCommandOptions((FILE *) NULL,(CommandOption) list, _exception); break; } break; } CLIWandException(OptionError,"UnrecognizedOption",option); DisableMSCWarning(4127) } while (0); /* break to exit code. */ RestoreMSCWarning /* clean up percent escape interpreted strings */ if (arg1 != arg1n ) arg1=DestroyString((char *)arg1); if (arg2 != arg2n ) arg2=DestroyString((char *)arg2); #undef _image_info #undef _images #undef _exception #undef IfNormalOp #undef IfPlusOp } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C L I O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLIOption() Processes the given option using the given CLI Magick Wand. % The option arguments can be variable in number, though at this time no more % that two is actually used by any option (this may change). Excess options % are simply ignored. % % If the cli_wand->command pointer is non-null, then it is assumed that the % option has already been search for up from the CommandOptions[] table in % "MagickCore/options.c" using GetCommandOptionInfo(). If not set this % routine will do the lookup instead. The pointer is reset afterward. % % This action allows the caller to lookup and pre-handle any 'special' % options, (such as implicit reads) before calling this general option % handler to deal with 'standard' command line options. % % The format of the CLIOption method is: % % void CLIOption(MagickCLI *cli_wand,const char *option, ...) % % A description of each parameter follows: % % o cli_wand: the main CLI Wand to use. % % o option: The special option (with any switch char) to process % % o args: any required arguments for an option (variable number) % % Example Usage... % % CLIoption(cli_wand,"-read","rose:"); % CLIoption(cli_wand,"-virtual-pixel","transparent"); % CLIoption(cli_wand,"-distort","SRT:","30"); % CLIoption(cli_wand,"-write","rotated_rose.png"); % */ WandExport void CLIOption(MagickCLI *cli_wand,const char *option,...) { const char /* extracted option args from args */ *arg1, *arg2; CommandOptionFlags option_type; assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); do { /* Break Code Block for error handling */ /* get information about option */ if ( cli_wand->command == (const OptionInfo *) NULL ) cli_wand->command = GetCommandOptionInfo(option); #if 0 (void) FormatLocaleFile(stderr, "CLIOption \"%s\" matched \"%s\"\n", option, cli_wand->command->mnemonic ); #endif option_type=(CommandOptionFlags) cli_wand->command->flags; if ( option_type == UndefinedOptionFlag ) CLIWandExceptionReturn(OptionFatalError,"UnrecognizedOption",option); assert( LocaleCompare(cli_wand->command->mnemonic,option) == 0 ); /* deprecated options */ if ( (option_type & DeprecateOptionFlag) != 0 ) CLIWandExceptionBreak(OptionError,"DeprecatedOptionNoCode",option); /* options that this module does not handle */ if ((option_type & (SpecialOptionFlag|GenesisOptionFlag)) != 0 ) CLIWandExceptionBreak(OptionFatalError,"InvalidUseOfOption",option); /* Get argument strings from VarArgs How can you determine if enough arguments was supplied? What happens if not enough arguments were supplied? */ { size_t count = (size_t) cli_wand->command->type; va_list operands; va_start(operands,option); arg1=arg2=NULL; if ( count >= 1 ) arg1=(const char *) va_arg(operands, const char *); if ( count >= 2 ) arg2=(const char *) va_arg(operands, const char *); va_end(operands); #if 0 (void) FormatLocaleFile(stderr, "CLIOption: \"%s\" Count: %ld Flags: %04x Args: \"%s\" \"%s\"\n", option,(long) count,option_type,arg1,arg2); #endif } /* Call the appropriate option handler */ /* FUTURE: this is temporary - get 'settings' to handle distribution of settings to images attributes,proprieties,artifacts */ if ( cli_wand->wand.images != (Image *) NULL ) (void) SyncImagesSettings(cli_wand->wand.image_info,cli_wand->wand.images, cli_wand->wand.exception); if ( (option_type & SettingOptionFlags) != 0 ) { CLISettingOptionInfo(cli_wand, option, arg1, arg2); /* FUTURE: Sync Specific Settings into Image Properities (not global) */ } /* Operators that do not need images - read, write, stack, clone */ if ((option_type & NoImageOperatorFlag) != 0) CLINoImageOperator(cli_wand, option, arg1, arg2); /* FUTURE: The not a setting part below is a temporary hack due to * some options being both a Setting and a Simple operator. * Specifically -monitor, -depth, and -colorspace */ if ( cli_wand->wand.images == (Image *) NULL ) if ( ((option_type & (SimpleOperatorFlag|ListOperatorFlag)) != 0 ) && ((option_type & SettingOptionFlags) == 0 )) /* temp hack */ CLIWandExceptionBreak(OptionError,"NoImagesFound",option); /* Operators which loop of individual images, simply */ if ( (option_type & SimpleOperatorFlag) != 0 && cli_wand->wand.images != (Image *) NULL) /* temp hack */ { ExceptionInfo *exception=AcquireExceptionInfo(); (void) CLISimpleOperatorImages(cli_wand, option, arg1, arg2,exception); exception=DestroyExceptionInfo(exception); } /* Operators that work on the image list as a whole */ if ( (option_type & ListOperatorFlag) != 0 ) (void) CLIListOperatorImages(cli_wand, option, arg1, arg2); DisableMSCWarning(4127) } while (0); /* end Break code block */ RestoreMSCWarning cli_wand->command = (const OptionInfo *) NULL; /* prevent re-use later */ }
./CrossVul/dataset_final_sorted/CWE-399/c/good_924_0
crossvul-cpp_data_good_909_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS % % P P SS % % PPPP SSS % % P SS % % P SSSSS % % % % % % Read/Write Postscript Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/delegate-private.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/profile.h" #include "MagickCore/resource_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* Forward declarations. */ static MagickBooleanType WritePSImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v o k e P o s t s r i p t D e l e g a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InvokePostscriptDelegate() executes the Postscript interpreter with the % specified command. % % The format of the InvokePostscriptDelegate method is: % % MagickBooleanType InvokePostscriptDelegate( % const MagickBooleanType verbose,const char *command, % ExceptionInfo *exception) % % A description of each parameter follows: % % o verbose: A value other than zero displays the command prior to % executing it. % % o command: the address of a character string containing the command to % execute. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT) static int MagickDLLCall PostscriptDelegateMessage(void *handle, const char *message,int length) { char **messages; ssize_t offset; offset=0; messages=(char **) handle; if (*messages == (char *) NULL) *messages=(char *) AcquireQuantumMemory((size_t) length+1,sizeof(char *)); else { offset=(ssize_t) strlen(*messages); *messages=(char *) ResizeQuantumMemory(*messages,(size_t) offset+length+1, sizeof(char *)); } if (*messages == (char *) NULL) return(0); (void) memcpy(*messages+offset,message,(size_t) length); (*messages)[length+offset] ='\0'; return(length); } #endif static MagickBooleanType InvokePostscriptDelegate( const MagickBooleanType verbose,const char *command,char *message, ExceptionInfo *exception) { int status; #if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT) #define SetArgsStart(command,args_start) \ if (args_start == (const char *) NULL) \ { \ if (*command != '"') \ args_start=strchr(command,' '); \ else \ { \ args_start=strchr(command+1,'"'); \ if (args_start != (const char *) NULL) \ args_start++; \ } \ } #define ExecuteGhostscriptCommand(command,status) \ { \ status=ExternalDelegateCommand(MagickFalse,verbose,command,message, \ exception); \ if (status == 0) \ return(MagickTrue); \ if (status < 0) \ return(MagickFalse); \ (void) ThrowMagickException(exception,GetMagickModule(),DelegateError, \ "FailedToExecuteCommand","`%s' (%d)",command,status); \ return(MagickFalse); \ } char **argv, *errors; const char *args_start = (const char *) NULL; const GhostInfo *ghost_info; gs_main_instance *interpreter; gsapi_revision_t revision; int argc, code; register ssize_t i; #if defined(MAGICKCORE_WINDOWS_SUPPORT) ghost_info=NTGhostscriptDLLVectors(); #else GhostInfo ghost_info_struct; ghost_info=(&ghost_info_struct); (void) memset(&ghost_info_struct,0,sizeof(ghost_info_struct)); ghost_info_struct.delete_instance=(void (*)(gs_main_instance *)) gsapi_delete_instance; ghost_info_struct.exit=(int (*)(gs_main_instance *)) gsapi_exit; ghost_info_struct.new_instance=(int (*)(gs_main_instance **,void *)) gsapi_new_instance; ghost_info_struct.init_with_args=(int (*)(gs_main_instance *,int,char **)) gsapi_init_with_args; ghost_info_struct.run_string=(int (*)(gs_main_instance *,const char *,int, int *)) gsapi_run_string; ghost_info_struct.set_stdio=(int (*)(gs_main_instance *,int (*)(void *,char *, int),int (*)(void *,const char *,int),int (*)(void *, const char *, int))) gsapi_set_stdio; ghost_info_struct.revision=(int (*)(gsapi_revision_t *,int)) gsapi_revision; #endif if (ghost_info == (GhostInfo *) NULL) ExecuteGhostscriptCommand(command,status); if ((ghost_info->revision)(&revision,(int) sizeof(revision)) != 0) revision.revision=0; if (verbose != MagickFalse) { (void) fprintf(stdout,"[ghostscript library %.2f]",(double) revision.revision/100.0); SetArgsStart(command,args_start); (void) fputs(args_start,stdout); } interpreter=(gs_main_instance *) NULL; errors=(char *) NULL; status=(ghost_info->new_instance)(&interpreter,(void *) &errors); if (status < 0) ExecuteGhostscriptCommand(command,status); code=0; argv=StringToArgv(command,&argc); if (argv == (char **) NULL) { (ghost_info->delete_instance)(interpreter); return(MagickFalse); } (void) (ghost_info->set_stdio)(interpreter,(int (MagickDLLCall *)(void *, char *,int)) NULL,PostscriptDelegateMessage,PostscriptDelegateMessage); status=(ghost_info->init_with_args)(interpreter,argc-1,argv+1); if (status == 0) status=(ghost_info->run_string)(interpreter,"systemdict /start get exec\n", 0,&code); (ghost_info->exit)(interpreter); (ghost_info->delete_instance)(interpreter); for (i=0; i < (ssize_t) argc; i++) argv[i]=DestroyString(argv[i]); argv=(char **) RelinquishMagickMemory(argv); if (status != 0) { SetArgsStart(command,args_start); if (status == -101) /* quit */ (void) FormatLocaleString(message,MagickPathExtent, "[ghostscript library %.2f]%s: %s",(double) revision.revision/100.0, args_start,errors); else { (void) ThrowMagickException(exception,GetMagickModule(), DelegateError,"PostscriptDelegateFailed", "`[ghostscript library %.2f]%s': %s",(double) revision.revision/ 100.0,args_start,errors); if (errors != (char *) NULL) errors=DestroyString(errors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Ghostscript returns status %d, exit code %d",status,code); return(MagickFalse); } } if (errors != (char *) NULL) errors=DestroyString(errors); return(MagickTrue); #else status=ExternalDelegateCommand(MagickFalse,verbose,command,message,exception); return(status == 0 ? MagickTrue : MagickFalse); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPS() returns MagickTrue if the image format type, identified by the % magick string, is PS. % % The format of the IsPS method is: % % MagickBooleanType IsPS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPS(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (memcmp(magick,"%!",2) == 0) return(MagickTrue); if (memcmp(magick,"\004%!",3) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSImage() reads a Postscript image file and returns it. It allocates % the memory necessary for the new Image structure and returns a pointer % to the new image. % % The format of the ReadPSImage method is: % % Image *ReadPSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsPostscriptRendered(const char *path) { MagickBooleanType status; struct stat attributes; if ((path == (const char *) NULL) || (*path == '\0')) return(MagickFalse); status=GetPathAttributes(path,&attributes); if ((status != MagickFalse) && S_ISREG(attributes.st_mode) && (attributes.st_size > 0)) return(MagickTrue); return(MagickFalse); } static inline int ProfileInteger(Image *image,short int *hex_digits) { int c, l, value; register ssize_t i; l=0; value=0; for (i=0; i < 2; ) { c=ReadBlobByte(image); if ((c == EOF) || ((c == '%') && (l == '%'))) { value=(-1); break; } l=c; c&=0xff; if (isxdigit(c) == MagickFalse) continue; value=(int) ((size_t) value << 4)+hex_digits[c]; i++; } return(value); } static Image *ReadPSImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define BoundingBox "BoundingBox:" #define BeginDocument "BeginDocument:" #define BeginXMPPacket "<?xpacket begin=" #define EndXMPPacket "<?xpacket end=" #define ICCProfile "BeginICCProfile:" #define CMYKCustomColor "CMYKCustomColor:" #define CMYKProcessColor "CMYKProcessColor:" #define DocumentMedia "DocumentMedia:" #define DocumentCustomColors "DocumentCustomColors:" #define DocumentProcessColors "DocumentProcessColors:" #define EndDocument "EndDocument:" #define HiResBoundingBox "HiResBoundingBox:" #define ImageData "ImageData:" #define PageBoundingBox "PageBoundingBox:" #define LanguageLevel "LanguageLevel:" #define PageMedia "PageMedia:" #define Pages "Pages:" #define PhotoshopProfile "BeginPhotoshop:" #define PostscriptLevel "!PS-" #define RenderPostscriptText " Rendering Postscript... " #define SpotColor "+ " char command[MagickPathExtent], *density, filename[MagickPathExtent], geometry[MagickPathExtent], input_filename[MagickPathExtent], message[MagickPathExtent], *options, postscript_filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; GeometryInfo geometry_info; Image *image, *next, *postscript_image; ImageInfo *read_info; int c, file; MagickBooleanType cmyk, fitPage, skip, status; MagickStatusType flags; PointInfo delta, resolution; RectangleInfo page; register char *p; register ssize_t i; SegmentInfo bounds, hires_bounds; short int hex_digits[256]; size_t length; ssize_t count, priority; StringInfo *profile; unsigned long columns, extent, language_level, pages, rows, scene, spotcolor; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } status=AcquireUniqueSymbolicLink(image_info->filename,input_filename); if (status == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile", image_info->filename); image=DestroyImageList(image); return((Image *) NULL); } /* Initialize hex values. */ (void) memset(hex_digits,0,sizeof(hex_digits)); hex_digits[(int) '0']=0; hex_digits[(int) '1']=1; hex_digits[(int) '2']=2; hex_digits[(int) '3']=3; hex_digits[(int) '4']=4; hex_digits[(int) '5']=5; hex_digits[(int) '6']=6; hex_digits[(int) '7']=7; hex_digits[(int) '8']=8; hex_digits[(int) '9']=9; hex_digits[(int) 'a']=10; hex_digits[(int) 'b']=11; hex_digits[(int) 'c']=12; hex_digits[(int) 'd']=13; hex_digits[(int) 'e']=14; hex_digits[(int) 'f']=15; hex_digits[(int) 'A']=10; hex_digits[(int) 'B']=11; hex_digits[(int) 'C']=12; hex_digits[(int) 'D']=13; hex_digits[(int) 'E']=14; hex_digits[(int) 'F']=15; /* Set the page density. */ delta.x=DefaultResolution; delta.y=DefaultResolution; if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0)) { flags=ParseGeometry(PSDensityGeometry,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } if (image_info->density != (char *) NULL) { flags=ParseGeometry(image_info->density,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } (void) ParseAbsoluteGeometry(PSPageGeometry,&page); if (image_info->page != (char *) NULL) (void) ParseAbsoluteGeometry(image_info->page,&page); resolution=image->resolution; page.width=(size_t) ceil((double) (page.width*resolution.x/delta.x)-0.5); page.height=(size_t) ceil((double) (page.height*resolution.y/delta.y)-0.5); /* Determine page geometry from the Postscript bounding box. */ (void) memset(&bounds,0,sizeof(bounds)); (void) memset(command,0,sizeof(command)); cmyk=image_info->colorspace == CMYKColorspace ? MagickTrue : MagickFalse; (void) memset(&hires_bounds,0,sizeof(hires_bounds)); columns=0; rows=0; priority=0; rows=0; extent=0; spotcolor=0; language_level=1; pages=(~0UL); skip=MagickFalse; p=command; for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image)) { /* Note document structuring comments. */ *p++=(char) c; if ((strchr("\n\r%",c) == (char *) NULL) && ((size_t) (p-command) < (MagickPathExtent-1))) continue; *p='\0'; p=command; /* Skip %%BeginDocument thru %%EndDocument. */ if (LocaleNCompare(BeginDocument,command,strlen(BeginDocument)) == 0) skip=MagickTrue; if (LocaleNCompare(EndDocument,command,strlen(EndDocument)) == 0) skip=MagickFalse; if (skip != MagickFalse) continue; if (LocaleNCompare(PostscriptLevel,command,strlen(PostscriptLevel)) == 0) { (void) SetImageProperty(image,"ps:Level",command+4,exception); if (GlobExpression(command,"*EPSF-*",MagickTrue) != MagickFalse) pages=1; } if (LocaleNCompare(LanguageLevel,command,strlen(LanguageLevel)) == 0) (void) sscanf(command,LanguageLevel " %lu",&language_level); if (LocaleNCompare(Pages,command,strlen(Pages)) == 0) (void) sscanf(command,Pages " %lu",&pages); if (LocaleNCompare(ImageData,command,strlen(ImageData)) == 0) (void) sscanf(command,ImageData " %lu %lu",&columns,&rows); /* Is this a CMYK document? */ length=strlen(DocumentProcessColors); if (LocaleNCompare(DocumentProcessColors,command,length) == 0) { if ((GlobExpression(command,"*Cyan*",MagickTrue) != MagickFalse) || (GlobExpression(command,"*Magenta*",MagickTrue) != MagickFalse) || (GlobExpression(command,"*Yellow*",MagickTrue) != MagickFalse)) cmyk=MagickTrue; } if (LocaleNCompare(CMYKCustomColor,command,strlen(CMYKCustomColor)) == 0) cmyk=MagickTrue; if (LocaleNCompare(CMYKProcessColor,command,strlen(CMYKProcessColor)) == 0) cmyk=MagickTrue; length=strlen(DocumentCustomColors); if ((LocaleNCompare(DocumentCustomColors,command,length) == 0) || (LocaleNCompare(CMYKCustomColor,command,strlen(CMYKCustomColor)) == 0) || (LocaleNCompare(SpotColor,command,strlen(SpotColor)) == 0)) { char property[MagickPathExtent], *value; register char *q; /* Note spot names. */ (void) FormatLocaleString(property,MagickPathExtent, "ps:SpotColor-%.20g",(double) (spotcolor++)); for (q=command; *q != '\0'; q++) if (isspace((int) (unsigned char) *q) != 0) break; value=ConstantString(q); (void) SubstituteString(&value,"(",""); (void) SubstituteString(&value,")",""); (void) StripString(value); if (*value != '\0') (void) SetImageProperty(image,property,value,exception); value=DestroyString(value); continue; } if (image_info->page != (char *) NULL) continue; /* Note region defined by bounding box. */ count=0; i=0; if (LocaleNCompare(BoundingBox,command,strlen(BoundingBox)) == 0) { count=(ssize_t) sscanf(command,BoundingBox " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=2; } if (LocaleNCompare(DocumentMedia,command,strlen(DocumentMedia)) == 0) { count=(ssize_t) sscanf(command,DocumentMedia " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=1; } if (LocaleNCompare(HiResBoundingBox,command,strlen(HiResBoundingBox)) == 0) { count=(ssize_t) sscanf(command,HiResBoundingBox " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=3; } if (LocaleNCompare(PageBoundingBox,command,strlen(PageBoundingBox)) == 0) { count=(ssize_t) sscanf(command,PageBoundingBox " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=1; } if (LocaleNCompare(PageMedia,command,strlen(PageMedia)) == 0) { count=(ssize_t) sscanf(command,PageMedia " %lf %lf %lf %lf", &bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2); i=1; } if ((count != 4) || (i < (ssize_t) priority)) continue; if ((fabs(bounds.x2-bounds.x1) <= fabs(hires_bounds.x2-hires_bounds.x1)) || (fabs(bounds.y2-bounds.y1) <= fabs(hires_bounds.y2-hires_bounds.y1))) if (i == (ssize_t) priority) continue; hires_bounds=bounds; priority=i; } if ((fabs(hires_bounds.x2-hires_bounds.x1) >= MagickEpsilon) && (fabs(hires_bounds.y2-hires_bounds.y1) >= MagickEpsilon)) { /* Set Postscript render geometry. */ (void) FormatLocaleString(geometry,MagickPathExtent,"%gx%g%+.15g%+.15g", hires_bounds.x2-hires_bounds.x1,hires_bounds.y2-hires_bounds.y1, hires_bounds.x1,hires_bounds.y1); (void) SetImageProperty(image,"ps:HiResBoundingBox",geometry,exception); page.width=(size_t) ceil((double) ((hires_bounds.x2-hires_bounds.x1)* resolution.x/delta.x)-0.5); page.height=(size_t) ceil((double) ((hires_bounds.y2-hires_bounds.y1)* resolution.y/delta.y)-0.5); } fitPage=MagickFalse; option=GetImageOption(image_info,"eps:fit-page"); if (option != (char *) NULL) { char *page_geometry; page_geometry=GetPageGeometry(option); flags=ParseMetaGeometry(page_geometry,&page.x,&page.y,&page.width, &page.height); if (flags == NoValue) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidGeometry","`%s'",option); page_geometry=DestroyString(page_geometry); image=DestroyImage(image); return((Image *) NULL); } page.width=(size_t) ceil((double) (page.width*image->resolution.x/delta.x) -0.5); page.height=(size_t) ceil((double) (page.height*image->resolution.y/ delta.y) -0.5); page_geometry=DestroyString(page_geometry); fitPage=MagickTrue; } if (IssRGBCompatibleColorspace(image_info->colorspace) != MagickFalse) cmyk=MagickFalse; /* Create Ghostscript control file. */ file=AcquireUniqueFileResource(postscript_filename); if (file == -1) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", image_info->filename); image=DestroyImageList(image); return((Image *) NULL); } (void) CopyMagickString(command,"/setpagedevice {pop} bind 1 index where {" "dup wcheck {3 1 roll put} {pop def} ifelse} {def} ifelse\n" "<</UseCIEColor true>>setpagedevice\n",MagickPathExtent); count=write(file,command,(unsigned int) strlen(command)); if (image_info->page == (char *) NULL) { char translate_geometry[MagickPathExtent]; (void) FormatLocaleString(translate_geometry,MagickPathExtent, "%g %g translate\n",-bounds.x1,-bounds.y1); count=write(file,translate_geometry,(unsigned int) strlen(translate_geometry)); } file=close(file)-1; /* Render Postscript with the Ghostscript delegate. */ if (image_info->monochrome != MagickFalse) delegate_info=GetDelegateInfo("ps:mono",(char *) NULL,exception); else if (cmyk != MagickFalse) delegate_info=GetDelegateInfo("ps:cmyk",(char *) NULL,exception); else delegate_info=GetDelegateInfo("ps:alpha",(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) RelinquishUniqueFileResource(postscript_filename); image=DestroyImageList(image); return((Image *) NULL); } density=AcquireString(""); options=AcquireString(""); (void) FormatLocaleString(density,MagickPathExtent,"%gx%g",resolution.x, resolution.y); (void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double) page.width,(double) page.height); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; if (read_info->number_scenes != 0) { char pages[MagickPathExtent]; (void) FormatLocaleString(pages,MagickPathExtent,"-dFirstPage=%.20g " "-dLastPage=%.20g ",(double) read_info->scene+1,(double) (read_info->scene+read_info->number_scenes)); (void) ConcatenateMagickString(options,pages,MagickPathExtent); read_info->number_scenes=0; if (read_info->scenes != (char *) NULL) *read_info->scenes='\0'; } if (*image_info->magick == 'E') { option=GetImageOption(image_info,"eps:use-cropbox"); if ((option == (const char *) NULL) || (IsStringTrue(option) != MagickFalse)) (void) ConcatenateMagickString(options,"-dEPSCrop ",MagickPathExtent); if (fitPage != MagickFalse) (void) ConcatenateMagickString(options,"-dEPSFitPage ", MagickPathExtent); } (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) AcquireUniqueFilename(filename); (void) RelinquishUniqueFileResource(filename); (void) ConcatenateMagickString(filename,"%d",MagickPathExtent); (void) FormatLocaleString(command,MagickPathExtent, GetDelegateCommands(delegate_info), read_info->antialias != MagickFalse ? 4 : 1, read_info->antialias != MagickFalse ? 4 : 1,density,options,filename, postscript_filename,input_filename); options=DestroyString(options); density=DestroyString(density); *message='\0'; status=InvokePostscriptDelegate(read_info->verbose,command,message,exception); (void) InterpretImageFilename(image_info,image,filename,1, read_info->filename,exception); if ((status == MagickFalse) || (IsPostscriptRendered(read_info->filename) == MagickFalse)) { (void) ConcatenateMagickString(command," -c showpage",MagickPathExtent); status=InvokePostscriptDelegate(read_info->verbose,command,message, exception); } (void) RelinquishUniqueFileResource(postscript_filename); (void) RelinquishUniqueFileResource(input_filename); postscript_image=(Image *) NULL; if (status == MagickFalse) for (i=1; ; i++) { (void) InterpretImageFilename(image_info,image,filename,(int) i, read_info->filename,exception); if (IsPostscriptRendered(read_info->filename) == MagickFalse) break; (void) RelinquishUniqueFileResource(read_info->filename); } else for (i=1; ; i++) { (void) InterpretImageFilename(image_info,image,filename,(int) i, read_info->filename,exception); if (IsPostscriptRendered(read_info->filename) == MagickFalse) break; read_info->blob=NULL; read_info->length=0; next=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(read_info->filename); if (next == (Image *) NULL) break; AppendImageToList(&postscript_image,next); } (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); if (postscript_image == (Image *) NULL) { if (*message != '\0') (void) ThrowMagickException(exception,GetMagickModule(), DelegateError,"PostscriptDelegateFailed","`%s'",message); image=DestroyImageList(image); return((Image *) NULL); } if (LocaleCompare(postscript_image->magick,"BMP") == 0) { Image *cmyk_image; cmyk_image=ConsolidateCMYKImages(postscript_image,exception); if (cmyk_image != (Image *) NULL) { postscript_image=DestroyImageList(postscript_image); postscript_image=cmyk_image; } } (void) SeekBlob(image,0,SEEK_SET); for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image)) { /* Note document structuring comments. */ *p++=(char) c; if ((strchr("\n\r%",c) == (char *) NULL) && ((size_t) (p-command) < (MagickPathExtent-1))) continue; *p='\0'; p=command; /* Skip %%BeginDocument thru %%EndDocument. */ if (LocaleNCompare(BeginDocument,command,strlen(BeginDocument)) == 0) skip=MagickTrue; if (LocaleNCompare(EndDocument,command,strlen(EndDocument)) == 0) skip=MagickFalse; if (skip != MagickFalse) continue; if (LocaleNCompare(ICCProfile,command,strlen(ICCProfile)) == 0) { unsigned char *datum; /* Read ICC profile. */ profile=AcquireStringInfo(MagickPathExtent); datum=GetStringInfoDatum(profile); for (i=0; (c=ProfileInteger(image,hex_digits)) != EOF; i++) { if (i >= (ssize_t) GetStringInfoLength(profile)) { SetStringInfoLength(profile,(size_t) i << 1); datum=GetStringInfoDatum(profile); } datum[i]=(unsigned char) c; } SetStringInfoLength(profile,(size_t) i+1); (void) SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); continue; } if (LocaleNCompare(PhotoshopProfile,command,strlen(PhotoshopProfile)) == 0) { unsigned char *q; /* Read Photoshop profile. */ count=(ssize_t) sscanf(command,PhotoshopProfile " %lu",&extent); if (count != 1) continue; length=extent; if ((MagickSizeType) length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); profile=BlobToStringInfo((const void *) NULL,length); if (profile != (StringInfo *) NULL) { q=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) length; i++) *q++=(unsigned char) ProfileInteger(image,hex_digits); (void) SetImageProfile(image,"8bim",profile,exception); profile=DestroyStringInfo(profile); } continue; } if (LocaleNCompare(BeginXMPPacket,command,strlen(BeginXMPPacket)) == 0) { /* Read XMP profile. */ p=command; profile=StringToStringInfo(command); for (i=(ssize_t) GetStringInfoLength(profile)-1; c != EOF; i++) { SetStringInfoLength(profile,(size_t) (i+1)); c=ReadBlobByte(image); GetStringInfoDatum(profile)[i]=(unsigned char) c; *p++=(char) c; if ((strchr("\n\r%",c) == (char *) NULL) && ((size_t) (p-command) < (MagickPathExtent-1))) continue; *p='\0'; p=command; if (LocaleNCompare(EndXMPPacket,command,strlen(EndXMPPacket)) == 0) break; } SetStringInfoLength(profile,(size_t) i); (void) SetImageProfile(image,"xmp",profile,exception); profile=DestroyStringInfo(profile); continue; } } (void) CloseBlob(image); if (image_info->number_scenes != 0) { Image *clone_image; /* Add place holder images to meet the subimage specification requirement. */ for (i=0; i < (ssize_t) image_info->scene; i++) { clone_image=CloneImage(postscript_image,1,1,MagickTrue,exception); if (clone_image != (Image *) NULL) PrependImageToList(&postscript_image,clone_image); } } do { (void) CopyMagickString(postscript_image->filename,filename, MagickPathExtent); (void) CopyMagickString(postscript_image->magick,image->magick, MagickPathExtent); if (columns != 0) postscript_image->magick_columns=columns; if (rows != 0) postscript_image->magick_rows=rows; postscript_image->page=page; (void) CloneImageProfiles(postscript_image,image); (void) CloneImageProperties(postscript_image,image); next=SyncNextImageInList(postscript_image); if (next != (Image *) NULL) postscript_image=next; } while (next != (Image *) NULL); image=DestroyImageList(image); scene=0; for (next=GetFirstImageInList(postscript_image); next != (Image *) NULL; ) { next->scene=scene++; next=GetNextImageInList(next); } return(GetFirstImageInList(postscript_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSImage() adds properties for the PS image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSImage method is: % % size_t RegisterPSImage(void) % */ ModuleExport size_t RegisterPSImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PS","EPI", "Encapsulated PostScript Interchange format"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderBlobSupportFlag; entry->mime_type=ConstantString("application/postscript"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PS","EPS","Encapsulated PostScript"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderBlobSupportFlag; entry->mime_type=ConstantString("application/postscript"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PS","EPSF","Encapsulated PostScript"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderBlobSupportFlag; entry->mime_type=ConstantString("application/postscript"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PS","EPSI", "Encapsulated PostScript Interchange format"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderAdjoinFlag; entry->flags^=CoderBlobSupportFlag; entry->mime_type=ConstantString("application/postscript"); (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PS","PS","PostScript"); entry->decoder=(DecodeImageHandler *) ReadPSImage; entry->encoder=(EncodeImageHandler *) WritePSImage; entry->magick=(IsImageFormatHandler *) IsPS; entry->mime_type=ConstantString("application/postscript"); entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags^=CoderBlobSupportFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSImage() removes format registrations made by the % PS module from the list of supported formats. % % The format of the UnregisterPSImage method is: % % UnregisterPSImage(void) % */ ModuleExport void UnregisterPSImage(void) { (void) UnregisterMagickInfo("EPI"); (void) UnregisterMagickInfo("EPS"); (void) UnregisterMagickInfo("EPSF"); (void) UnregisterMagickInfo("EPSI"); (void) UnregisterMagickInfo("PS"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSImage translates an image to encapsulated Postscript % Level I for printing. If the supplied geometry is null, the image is % centered on the Postscript page. Otherwise, the image is positioned as % specified by the geometry. % % The format of the WritePSImage method is: % % MagickBooleanType WritePSImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline unsigned char *PopHexPixel(const char hex_digits[][3], const size_t pixel,unsigned char *pixels) { register const char *hex; hex=hex_digits[pixel]; *pixels++=(unsigned char) (*hex++ & 0xff); *pixels++=(unsigned char) (*hex & 0xff); return(pixels); } static MagickBooleanType WritePSImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { #define WriteRunlengthPacket(image,pixel,length,p) \ { \ if ((image->alpha_trait != UndefinedPixelTrait) && (length != 0) && \ (GetPixelAlpha(image,p) == (Quantum) TransparentAlpha)) \ { \ q=PopHexPixel(hex_digits,0xff,q); \ q=PopHexPixel(hex_digits,0xff,q); \ q=PopHexPixel(hex_digits,0xff,q); \ } \ else \ { \ q=PopHexPixel(hex_digits,ScaleQuantumToChar(ClampToQuantum(pixel.red)),q); \ q=PopHexPixel(hex_digits,ScaleQuantumToChar(ClampToQuantum(pixel.green)),q); \ q=PopHexPixel(hex_digits,ScaleQuantumToChar(ClampToQuantum(pixel.blue)),q); \ } \ q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q); \ } static const char hex_digits[][3] = { "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "0A", "0B", "0C", "0D", "0E", "0F", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "1A", "1B", "1C", "1D", "1E", "1F", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "2A", "2B", "2C", "2D", "2E", "2F", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "3A", "3B", "3C", "3D", "3E", "3F", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "4A", "4B", "4C", "4D", "4E", "4F", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "5A", "5B", "5C", "5D", "5E", "5F", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "6A", "6B", "6C", "6D", "6E", "6F", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "7A", "7B", "7C", "7D", "7E", "7F", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "8A", "8B", "8C", "8D", "8E", "8F", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "9A", "9B", "9C", "9D", "9E", "9F", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "A9", "AA", "AB", "AC", "AD", "AE", "AF", "B0", "B1", "B2", "B3", "B4", "B5", "B6", "B7", "B8", "B9", "BA", "BB", "BC", "BD", "BE", "BF", "C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "CA", "CB", "CC", "CD", "CE", "CF", "D0", "D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8", "D9", "DA", "DB", "DC", "DD", "DE", "DF", "E0", "E1", "E2", "E3", "E4", "E5", "E6", "E7", "E8", "E9", "EA", "EB", "EC", "ED", "EE", "EF", "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "FA", "FB", "FC", "FD", "FE", "FF" }, PostscriptProlog[] = "%%BeginProlog\n" "%\n" "% Display a color image. The image is displayed in color on\n" "% Postscript viewers or printers that support color, otherwise\n" "% it is displayed as grayscale.\n" "%\n" "/DirectClassPacket\n" "{\n" " %\n" " % Get a DirectClass packet.\n" " %\n" " % Parameters:\n" " % red.\n" " % green.\n" " % blue.\n" " % length: number of pixels minus one of this color (optional).\n" " %\n" " currentfile color_packet readhexstring pop pop\n" " compression 0 eq\n" " {\n" " /number_pixels 3 def\n" " }\n" " {\n" " currentfile byte readhexstring pop 0 get\n" " /number_pixels exch 1 add 3 mul def\n" " } ifelse\n" " 0 3 number_pixels 1 sub\n" " {\n" " pixels exch color_packet putinterval\n" " } for\n" " pixels 0 number_pixels getinterval\n" "} bind def\n" "\n" "/DirectClassImage\n" "{\n" " %\n" " % Display a DirectClass image.\n" " %\n" " systemdict /colorimage known\n" " {\n" " columns rows 8\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { DirectClassPacket } false 3 colorimage\n" " }\n" " {\n" " %\n" " % No colorimage operator; convert to grayscale.\n" " %\n" " columns rows 8\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { GrayDirectClassPacket } image\n" " } ifelse\n" "} bind def\n" "\n" "/GrayDirectClassPacket\n" "{\n" " %\n" " % Get a DirectClass packet; convert to grayscale.\n" " %\n" " % Parameters:\n" " % red\n" " % green\n" " % blue\n" " % length: number of pixels minus one of this color (optional).\n" " %\n" " currentfile color_packet readhexstring pop pop\n" " color_packet 0 get 0.299 mul\n" " color_packet 1 get 0.587 mul add\n" " color_packet 2 get 0.114 mul add\n" " cvi\n" " /gray_packet exch def\n" " compression 0 eq\n" " {\n" " /number_pixels 1 def\n" " }\n" " {\n" " currentfile byte readhexstring pop 0 get\n" " /number_pixels exch 1 add def\n" " } ifelse\n" " 0 1 number_pixels 1 sub\n" " {\n" " pixels exch gray_packet put\n" " } for\n" " pixels 0 number_pixels getinterval\n" "} bind def\n" "\n" "/GrayPseudoClassPacket\n" "{\n" " %\n" " % Get a PseudoClass packet; convert to grayscale.\n" " %\n" " % Parameters:\n" " % index: index into the colormap.\n" " % length: number of pixels minus one of this color (optional).\n" " %\n" " currentfile byte readhexstring pop 0 get\n" " /offset exch 3 mul def\n" " /color_packet colormap offset 3 getinterval def\n" " color_packet 0 get 0.299 mul\n" " color_packet 1 get 0.587 mul add\n" " color_packet 2 get 0.114 mul add\n" " cvi\n" " /gray_packet exch def\n" " compression 0 eq\n" " {\n" " /number_pixels 1 def\n" " }\n" " {\n" " currentfile byte readhexstring pop 0 get\n" " /number_pixels exch 1 add def\n" " } ifelse\n" " 0 1 number_pixels 1 sub\n" " {\n" " pixels exch gray_packet put\n" " } for\n" " pixels 0 number_pixels getinterval\n" "} bind def\n" "\n" "/PseudoClassPacket\n" "{\n" " %\n" " % Get a PseudoClass packet.\n" " %\n" " % Parameters:\n" " % index: index into the colormap.\n" " % length: number of pixels minus one of this color (optional).\n" " %\n" " currentfile byte readhexstring pop 0 get\n" " /offset exch 3 mul def\n" " /color_packet colormap offset 3 getinterval def\n" " compression 0 eq\n" " {\n" " /number_pixels 3 def\n" " }\n" " {\n" " currentfile byte readhexstring pop 0 get\n" " /number_pixels exch 1 add 3 mul def\n" " } ifelse\n" " 0 3 number_pixels 1 sub\n" " {\n" " pixels exch color_packet putinterval\n" " } for\n" " pixels 0 number_pixels getinterval\n" "} bind def\n" "\n" "/PseudoClassImage\n" "{\n" " %\n" " % Display a PseudoClass image.\n" " %\n" " % Parameters:\n" " % class: 0-PseudoClass or 1-Grayscale.\n" " %\n" " currentfile buffer readline pop\n" " token pop /class exch def pop\n" " class 0 gt\n" " {\n" " currentfile buffer readline pop\n" " token pop /depth exch def pop\n" " /grays columns 8 add depth sub depth mul 8 idiv string def\n" " columns rows depth\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { currentfile grays readhexstring pop } image\n" " }\n" " {\n" " %\n" " % Parameters:\n" " % colors: number of colors in the colormap.\n" " % colormap: red, green, blue color packets.\n" " %\n" " currentfile buffer readline pop\n" " token pop /colors exch def pop\n" " /colors colors 3 mul def\n" " /colormap colors string def\n" " currentfile colormap readhexstring pop pop\n" " systemdict /colorimage known\n" " {\n" " columns rows 8\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { PseudoClassPacket } false 3 colorimage\n" " }\n" " {\n" " %\n" " % No colorimage operator; convert to grayscale.\n" " %\n" " columns rows 8\n" " [\n" " columns 0 0\n" " rows neg 0 rows\n" " ]\n" " { GrayPseudoClassPacket } image\n" " } ifelse\n" " } ifelse\n" "} bind def\n" "\n" "/DisplayImage\n" "{\n" " %\n" " % Display a DirectClass or PseudoClass image.\n" " %\n" " % Parameters:\n" " % x & y translation.\n" " % x & y scale.\n" " % label pointsize.\n" " % image label.\n" " % image columns & rows.\n" " % class: 0-DirectClass or 1-PseudoClass.\n" " % compression: 0-none or 1-RunlengthEncoded.\n" " % hex color packets.\n" " %\n" " gsave\n" " /buffer 512 string def\n" " /byte 1 string def\n" " /color_packet 3 string def\n" " /pixels 768 string def\n" "\n" " currentfile buffer readline pop\n" " token pop /x exch def\n" " token pop /y exch def pop\n" " x y translate\n" " currentfile buffer readline pop\n" " token pop /x exch def\n" " token pop /y exch def pop\n" " currentfile buffer readline pop\n" " token pop /pointsize exch def pop\n", PostscriptEpilog[] = " x y scale\n" " currentfile buffer readline pop\n" " token pop /columns exch def\n" " token pop /rows exch def pop\n" " currentfile buffer readline pop\n" " token pop /class exch def pop\n" " currentfile buffer readline pop\n" " token pop /compression exch def pop\n" " class 0 gt { PseudoClassImage } { DirectClassImage } ifelse\n" " grestore\n"; char buffer[MagickPathExtent], date[MagickPathExtent], **labels, page_geometry[MagickPathExtent]; CompressionType compression; const char *value; const StringInfo *profile; double pointsize; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType scene; MagickStatusType flags; PixelInfo pixel; PointInfo delta, resolution, scale; Quantum index; RectangleInfo geometry, media_info, page_info; register const Quantum *p; register ssize_t i, x; register unsigned char *q; SegmentInfo bounds; size_t bit, byte, imageListLength, length, page, text_size; ssize_t j, y; time_t timer; unsigned char pixels[2048]; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) memset(&bounds,0,sizeof(bounds)); compression=image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; page=1; scene=0; imageListLength=GetImageListLength(image); do { /* Scale relative to dots-per-inch. */ (void) TransformImageColorspace(image,sRGBColorspace,exception); delta.x=DefaultResolution; delta.y=DefaultResolution; resolution.x=image->resolution.x; resolution.y=image->resolution.y; if ((resolution.x == 0.0) || (resolution.y == 0.0)) { flags=ParseGeometry(PSDensityGeometry,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) resolution.y=resolution.x; } if (image_info->density != (char *) NULL) { flags=ParseGeometry(image_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) resolution.y=resolution.x; } if (image->units == PixelsPerCentimeterResolution) { resolution.x=(double) ((size_t) (100.0*2.54*resolution.x+0.5)/100.0); resolution.y=(double) ((size_t) (100.0*2.54*resolution.y+0.5)/100.0); } SetGeometry(image,&geometry); (void) FormatLocaleString(page_geometry,MagickPathExtent,"%.20gx%.20g", (double) image->columns,(double) image->rows); if (image_info->page != (char *) NULL) (void) CopyMagickString(page_geometry,image_info->page,MagickPathExtent); else if ((image->page.width != 0) && (image->page.height != 0)) (void) FormatLocaleString(page_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) image->page.width,(double) image->page.height,(double) image->page.x,(double) image->page.y); else if ((image->gravity != UndefinedGravity) && (LocaleCompare(image_info->magick,"PS") == 0)) (void) CopyMagickString(page_geometry,PSPageGeometry, MagickPathExtent); (void) ConcatenateMagickString(page_geometry,">",MagickPathExtent); (void) ParseMetaGeometry(page_geometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); scale.x=PerceptibleReciprocal(resolution.x)*geometry.width*delta.x; geometry.width=(size_t) floor(scale.x+0.5); scale.y=PerceptibleReciprocal(resolution.y)*geometry.height*delta.y; geometry.height=(size_t) floor(scale.y+0.5); (void) ParseAbsoluteGeometry(page_geometry,&media_info); (void) ParseGravityGeometry(image,page_geometry,&page_info,exception); if (image->gravity != UndefinedGravity) { geometry.x=(-page_info.x); geometry.y=(ssize_t) (media_info.height+page_info.y-image->rows); } pointsize=12.0; if (image_info->pointsize != 0.0) pointsize=image_info->pointsize; text_size=0; value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) text_size=(size_t) (MultilineCensus(value)*pointsize+12); if (page == 1) { /* Output Postscript header. */ if (LocaleCompare(image_info->magick,"PS") == 0) (void) CopyMagickString(buffer,"%!PS-Adobe-3.0\n",MagickPathExtent); else (void) CopyMagickString(buffer,"%!PS-Adobe-3.0 EPSF-3.0\n", MagickPathExtent); (void) WriteBlobString(image,buffer); (void) WriteBlobString(image,"%%Creator: (ImageMagick)\n"); (void) FormatLocaleString(buffer,MagickPathExtent,"%%%%Title: (%s)\n", image->filename); (void) WriteBlobString(image,buffer); timer=GetMagickTime(); (void) FormatMagickTime(timer,MagickPathExtent,date); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%CreationDate: (%s)\n",date); (void) WriteBlobString(image,buffer); bounds.x1=(double) geometry.x; bounds.y1=(double) geometry.y; bounds.x2=(double) geometry.x+scale.x; bounds.y2=(double) geometry.y+(geometry.height+text_size); if ((image_info->adjoin != MagickFalse) && (GetNextImageInList(image) != (Image *) NULL)) (void) CopyMagickString(buffer,"%%%%BoundingBox: (atend)\n", MagickPathExtent); else { (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%BoundingBox: %.20g %.20g %.20g %.20g\n",ceil(bounds.x1-0.5), ceil(bounds.y1-0.5),floor(bounds.x2+0.5),floor(bounds.y2+0.5)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%HiResBoundingBox: %g %g %g %g\n",bounds.x1, bounds.y1,bounds.x2,bounds.y2); } (void) WriteBlobString(image,buffer); profile=GetImageProfile(image,"8bim"); if (profile != (StringInfo *) NULL) { /* Embed Photoshop profile. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%%BeginPhotoshop: %.20g",(double) GetStringInfoLength(profile)); (void) WriteBlobString(image,buffer); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) { if ((i % 32) == 0) (void) WriteBlobString(image,"\n% "); (void) FormatLocaleString(buffer,MagickPathExtent,"%02X", (unsigned int) (GetStringInfoDatum(profile)[i] & 0xff)); (void) WriteBlobString(image,buffer); } (void) WriteBlobString(image,"\n%EndPhotoshop\n"); } profile=GetImageProfile(image,"xmp"); DisableMSCWarning(4127) if (0 && (profile != (StringInfo *) NULL)) RestoreMSCWarning { /* Embed XML profile. */ (void) WriteBlobString(image,"\n%begin_xml_code\n"); (void) FormatLocaleString(buffer,MagickPathExtent, "\n%%begin_xml_packet: %.20g\n",(double) GetStringInfoLength(profile)); (void) WriteBlobString(image,buffer); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) (void) WriteBlobByte(image,GetStringInfoDatum(profile)[i]); (void) WriteBlobString(image,"\n%end_xml_packet\n%end_xml_code\n"); } value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) (void) WriteBlobString(image, "%%DocumentNeededResources: font Times-Roman\n"); (void) WriteBlobString(image,"%%DocumentData: Clean7Bit\n"); (void) WriteBlobString(image,"%%LanguageLevel: 1\n"); if (LocaleCompare(image_info->magick,"PS") != 0) (void) WriteBlobString(image,"%%Pages: 1\n"); else { /* Compute the number of pages. */ (void) WriteBlobString(image,"%%Orientation: Portrait\n"); (void) WriteBlobString(image,"%%PageOrder: Ascend\n"); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%Pages: %.20g\n",image_info->adjoin != MagickFalse ? (double) imageListLength : 1.0); (void) WriteBlobString(image,buffer); } (void) WriteBlobString(image,"%%EndComments\n"); (void) WriteBlobString(image,"\n%%BeginDefaults\n"); (void) WriteBlobString(image,"%%EndDefaults\n\n"); if ((LocaleCompare(image_info->magick,"EPI") == 0) || (LocaleCompare(image_info->magick,"EPSI") == 0) || (LocaleCompare(image_info->magick,"EPT") == 0)) { Image *preview_image; Quantum pixel; register ssize_t x; ssize_t y; /* Create preview image. */ preview_image=CloneImage(image,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Dump image as bitmap. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%BeginPreview: %.20g %.20g %.20g %.20g\n%% ",(double) preview_image->columns,(double) preview_image->rows,1.0, (double) ((((preview_image->columns+7) >> 3)*preview_image->rows+ 35)/36)); (void) WriteBlobString(image,buffer); q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(preview_image,0,y,preview_image->columns,1, exception); if (p == (const Quantum *) NULL) break; bit=0; byte=0; for (x=0; x < (ssize_t) preview_image->columns; x++) { byte<<=1; pixel=ClampToQuantum(GetPixelLuma(preview_image,p)); if (pixel >= (Quantum) (QuantumRange/2)) byte|=0x01; bit++; if (bit == 8) { q=PopHexPixel(hex_digits,byte,q); if ((q-pixels+8) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; (void) WriteBlobString(image,"% "); }; bit=0; byte=0; } } if (bit != 0) { byte<<=(8-bit); q=PopHexPixel(hex_digits,byte,q); if ((q-pixels+8) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; (void) WriteBlobString(image,"% "); }; }; } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } (void) WriteBlobString(image,"\n%%EndPreview\n"); preview_image=DestroyImage(preview_image); } /* Output Postscript commands. */ (void) WriteBlob(image,sizeof(PostscriptProlog)-1, (const unsigned char *) PostscriptProlog); value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) { (void) WriteBlobString(image, " /Times-Roman findfont pointsize scalefont setfont\n"); for (j=(ssize_t) MultilineCensus(value)-1; j >= 0; j--) { (void) WriteBlobString(image," /label 512 string def\n"); (void) WriteBlobString(image, " currentfile label readline pop\n"); (void) FormatLocaleString(buffer,MagickPathExtent, " 0 y %g add moveto label show pop\n",j*pointsize+12); (void) WriteBlobString(image,buffer); } } (void) WriteBlob(image,sizeof(PostscriptEpilog)-1, (const unsigned char *) PostscriptEpilog); if (LocaleCompare(image_info->magick,"PS") == 0) (void) WriteBlobString(image," showpage\n"); (void) WriteBlobString(image,"} bind def\n"); (void) WriteBlobString(image,"%%EndProlog\n"); } (void) FormatLocaleString(buffer,MagickPathExtent,"%%%%Page: 1 %.20g\n", (double) (page++)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%PageBoundingBox: %.20g %.20g %.20g %.20g\n",(double) geometry.x, (double) geometry.y,geometry.x+(double) geometry.width,geometry.y+(double) (geometry.height+text_size)); (void) WriteBlobString(image,buffer); if ((double) geometry.x < bounds.x1) bounds.x1=(double) geometry.x; if ((double) geometry.y < bounds.y1) bounds.y1=(double) geometry.y; if ((double) (geometry.x+geometry.width-1) > bounds.x2) bounds.x2=(double) geometry.x+geometry.width-1; if ((double) (geometry.y+(geometry.height+text_size)-1) > bounds.y2) bounds.y2=(double) geometry.y+(geometry.height+text_size)-1; value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) (void) WriteBlobString(image,"%%%%PageResources: font Times-Roman\n"); if (LocaleCompare(image_info->magick,"PS") != 0) (void) WriteBlobString(image,"userdict begin\n"); (void) WriteBlobString(image,"DisplayImage\n"); /* Output image data. */ (void) FormatLocaleString(buffer,MagickPathExtent,"%.20g %.20g\n%g %g\n%g\n", (double) geometry.x,(double) geometry.y,scale.x,scale.y,pointsize); (void) WriteBlobString(image,buffer); labels=(char **) NULL; value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) labels=StringToList(value); if (labels != (char **) NULL) { for (i=0; labels[i] != (char *) NULL; i++) { (void) FormatLocaleString(buffer,MagickPathExtent,"%s \n", labels[i]); (void) WriteBlobString(image,buffer); labels[i]=DestroyString(labels[i]); } labels=(char **) RelinquishMagickMemory(labels); } (void) memset(&pixel,0,sizeof(pixel)); pixel.alpha=(MagickRealType) TransparentAlpha; index=(Quantum) 0; x=0; if ((image_info->type != TrueColorType) && (SetImageGray(image,exception) != MagickFalse)) { if (SetImageMonochrome(image,exception) == MagickFalse) { Quantum pixel; /* Dump image as grayscale. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%.20g %.20g\n1\n1\n1\n8\n",(double) image->columns,(double) image->rows); (void) WriteBlobString(image,buffer); q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { pixel=(Quantum) ScaleQuantumToChar(ClampToQuantum(GetPixelLuma( image,p))); q=PopHexPixel(hex_digits,(size_t) pixel,q); if ((q-pixels+8) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } } else { ssize_t y; Quantum pixel; /* Dump image as bitmap. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%.20g %.20g\n1\n1\n1\n1\n",(double) image->columns,(double) image->rows); (void) WriteBlobString(image,buffer); q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; pixel=ClampToQuantum(GetPixelLuma(image,p)); if (pixel >= (Quantum) (QuantumRange/2)) byte|=0x01; bit++; if (bit == 8) { q=PopHexPixel(hex_digits,byte,q); if ((q-pixels+2) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; }; bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); q=PopHexPixel(hex_digits,byte,q); if ((q-pixels+2) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } }; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } } } else if ((image->storage_class == DirectClass) || (image->colors > 256) || (image->alpha_trait != UndefinedPixelTrait)) { /* Dump DirectClass image. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%.20g %.20g\n0\n%d\n",(double) image->columns,(double) image->rows, compression == RLECompression ? 1 : 0); (void) WriteBlobString(image,buffer); switch (compression) { case RLECompression: { /* Dump runlength-encoded DirectColor packets. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; GetPixelInfoPixel(image,p,&pixel); length=255; for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(image,p) == ClampToQuantum(pixel.red)) && (GetPixelGreen(image,p) == ClampToQuantum(pixel.green)) && (GetPixelBlue(image,p) == ClampToQuantum(pixel.blue)) && (GetPixelAlpha(image,p) == ClampToQuantum(pixel.alpha)) && (length < 255) && (x < (ssize_t) (image->columns-1))) length++; else { if (x > 0) { WriteRunlengthPacket(image,pixel,length,p); if ((q-pixels+10) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } } length=0; } GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); } WriteRunlengthPacket(image,pixel,length,p); if ((q-pixels+10) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } break; } case NoCompression: default: { /* Dump uncompressed DirectColor packets. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if ((image->alpha_trait != UndefinedPixelTrait) && (GetPixelAlpha(image,p) == (Quantum) TransparentAlpha)) { q=PopHexPixel(hex_digits,0xff,q); q=PopHexPixel(hex_digits,0xff,q); q=PopHexPixel(hex_digits,0xff,q); } else { q=PopHexPixel(hex_digits,ScaleQuantumToChar( GetPixelRed(image,p)),q); q=PopHexPixel(hex_digits,ScaleQuantumToChar( GetPixelGreen(image,p)),q); q=PopHexPixel(hex_digits,ScaleQuantumToChar( GetPixelBlue(image,p)),q); } if ((q-pixels+6) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } break; } } (void) WriteBlobByte(image,'\n'); } else { /* Dump PseudoClass image. */ (void) FormatLocaleString(buffer,MagickPathExtent, "%.20g %.20g\n%d\n%d\n0\n",(double) image->columns,(double) image->rows,image->storage_class == PseudoClass ? 1 : 0, compression == RLECompression ? 1 : 0); (void) WriteBlobString(image,buffer); /* Dump number of colors and colormap. */ (void) FormatLocaleString(buffer,MagickPathExtent,"%.20g\n",(double) image->colors); (void) WriteBlobString(image,buffer); for (i=0; i < (ssize_t) image->colors; i++) { (void) FormatLocaleString(buffer,MagickPathExtent,"%02X%02X%02X\n", ScaleQuantumToChar(ClampToQuantum(image->colormap[i].red)), ScaleQuantumToChar(ClampToQuantum(image->colormap[i].green)), ScaleQuantumToChar(ClampToQuantum(image->colormap[i].blue))); (void) WriteBlobString(image,buffer); } switch (compression) { case RLECompression: { /* Dump runlength-encoded PseudoColor packets. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; index=GetPixelIndex(image,p); length=255; for (x=0; x < (ssize_t) image->columns; x++) { if ((index == GetPixelIndex(image,p)) && (length < 255) && (x < ((ssize_t) image->columns-1))) length++; else { if (x > 0) { q=PopHexPixel(hex_digits,(size_t) index,q); q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q); i++; if ((q-pixels+6) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } } length=0; } index=GetPixelIndex(image,p); pixel.red=(MagickRealType) GetPixelRed(image,p); pixel.green=(MagickRealType) GetPixelGreen(image,p); pixel.blue=(MagickRealType) GetPixelBlue(image,p); pixel.alpha=(MagickRealType) GetPixelAlpha(image,p); p+=GetPixelChannels(image); } q=PopHexPixel(hex_digits,(size_t) index,q); q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q); if ((q-pixels+6) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } break; } case NoCompression: default: { /* Dump uncompressed PseudoColor packets. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { q=PopHexPixel(hex_digits,(size_t) GetPixelIndex(image,p),q); if ((q-pixels+4) >= 80) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); q=pixels; } p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } if (q != pixels) { *q++='\n'; (void) WriteBlob(image,q-pixels,pixels); } break; } } (void) WriteBlobByte(image,'\n'); } if (LocaleCompare(image_info->magick,"PS") != 0) (void) WriteBlobString(image,"end\n"); (void) WriteBlobString(image,"%%PageTrailer\n"); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); (void) WriteBlobString(image,"%%Trailer\n"); if (page > 2) { (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%BoundingBox: %.20g %.20g %.20g %.20g\n",ceil(bounds.x1-0.5), ceil(bounds.y1-0.5),floor(bounds.x2-0.5),floor(bounds.y2-0.5)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "%%%%HiResBoundingBox: %g %g %g %g\n",bounds.x1,bounds.y1,bounds.x2, bounds.y2); (void) WriteBlobString(image,buffer); } (void) WriteBlobString(image,"%%EOF\n"); (void) CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_909_0
crossvul-cpp_data_good_3486_4
/* * linux/arch/arm/kernel/ptrace.c * * By Ross Biro 1/23/92 * edited by Linus Torvalds * ARM modifications Copyright (C) 2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/uaccess.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/regset.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/traps.h> #define REG_PC 15 #define REG_PSR 16 /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ #if 0 /* * Breakpoint SWI instruction: SWI &9F0001 */ #define BREAKINST_ARM 0xef9f0001 #define BREAKINST_THUMB 0xdf00 /* fill this in later */ #else /* * New breakpoints - use an undefined instruction. The ARM architecture * reference manual guarantees that the following instruction space * will produce an undefined instruction exception on all CPUs: * * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx * Thumb: 1101 1110 xxxx xxxx */ #define BREAKINST_ARM 0xe7f001f0 #define BREAKINST_THUMB 0xde01 #endif struct pt_regs_offset { const char *name; int offset; }; #define REG_OFFSET_NAME(r) \ {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)} #define REG_OFFSET_END {.name = NULL, .offset = 0} static const struct pt_regs_offset regoffset_table[] = { REG_OFFSET_NAME(r0), REG_OFFSET_NAME(r1), REG_OFFSET_NAME(r2), REG_OFFSET_NAME(r3), REG_OFFSET_NAME(r4), REG_OFFSET_NAME(r5), REG_OFFSET_NAME(r6), REG_OFFSET_NAME(r7), REG_OFFSET_NAME(r8), REG_OFFSET_NAME(r9), REG_OFFSET_NAME(r10), REG_OFFSET_NAME(fp), REG_OFFSET_NAME(ip), REG_OFFSET_NAME(sp), REG_OFFSET_NAME(lr), REG_OFFSET_NAME(pc), REG_OFFSET_NAME(cpsr), REG_OFFSET_NAME(ORIG_r0), REG_OFFSET_END, }; /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL; */ int regs_query_register_offset(const char *name) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL; } /** * regs_query_register_name() - query register name from its offset * @offset: the offset of a register in struct pt_regs. * * regs_query_register_name() returns the name of a register from its * offset in struct pt_regs. If the @offset is invalid, this returns NULL; */ const char *regs_query_register_name(unsigned int offset) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (roff->offset == offset) return roff->name; return NULL; } /** * regs_within_kernel_stack() - check the address in the stack * @regs: pt_regs which contains kernel stack pointer. * @addr: address which is checked. * * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). * If @addr is within the kernel stack, it returns true. If not, returns false. */ bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) { return ((addr & ~(THREAD_SIZE - 1)) == (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); } /** * regs_get_kernel_stack_nth() - get Nth entry of the stack * @regs: pt_regs which contains kernel stack pointer. * @n: stack entry number. * * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which * is specified by @regs. If the @n th entry is NOT in the kernel stack, * this returns 0. */ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) { unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); addr += n; if (regs_within_kernel_stack(regs, (unsigned long)addr)) return *addr; else return 0; } /* * this routine will get a word off of the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space. */ static inline long get_user_reg(struct task_struct *task, int offset) { return task_pt_regs(task)->uregs[offset]; } /* * this routine will put a word on the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space. */ static inline int put_user_reg(struct task_struct *task, int offset, long data) { struct pt_regs newregs, *regs = task_pt_regs(task); int ret = -EINVAL; newregs = *regs; newregs.uregs[offset] = data; if (valid_user_regs(&newregs)) { regs->uregs[offset] = data; ret = 0; } return ret; } /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { /* Nothing to do. */ } /* * Handle hitting a breakpoint. */ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) { siginfo_t info; info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; info.si_addr = (void __user *)instruction_pointer(regs); force_sig_info(SIGTRAP, &info, tsk); } static int break_trap(struct pt_regs *regs, unsigned int instr) { ptrace_break(current, regs); return 0; } static struct undef_hook arm_break_hook = { .instr_mask = 0x0fffffff, .instr_val = 0x07f001f0, .cpsr_mask = PSR_T_BIT, .cpsr_val = 0, .fn = break_trap, }; static struct undef_hook thumb_break_hook = { .instr_mask = 0xffff, .instr_val = 0xde01, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = break_trap, }; static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr) { unsigned int instr2; void __user *pc; /* Check the second half of the instruction. */ pc = (void __user *)(instruction_pointer(regs) + 2); if (processor_mode(regs) == SVC_MODE) { instr2 = *(u16 *) pc; } else { get_user(instr2, (u16 __user *)pc); } if (instr2 == 0xa000) { ptrace_break(current, regs); return 0; } else { return 1; } } static struct undef_hook thumb2_break_hook = { .instr_mask = 0xffff, .instr_val = 0xf7f0, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = thumb2_break_trap, }; static int __init ptrace_break_init(void) { register_undef_hook(&arm_break_hook); register_undef_hook(&thumb_break_hook); register_undef_hook(&thumb2_break_hook); return 0; } core_initcall(ptrace_break_init); /* * Read the word at offset "off" into the "struct user". We * actually access the pt_regs stored on the kernel stack. */ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, unsigned long __user *ret) { unsigned long tmp; if (off & 3 || off >= sizeof(struct user)) return -EIO; tmp = 0; if (off == PT_TEXT_ADDR) tmp = tsk->mm->start_code; else if (off == PT_DATA_ADDR) tmp = tsk->mm->start_data; else if (off == PT_TEXT_END_ADDR) tmp = tsk->mm->end_code; else if (off < sizeof(struct pt_regs)) tmp = get_user_reg(tsk, off >> 2); return put_user(tmp, ret); } /* * Write the word at offset "off" into "struct user". We * actually access the pt_regs stored on the kernel stack. */ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, unsigned long val) { if (off & 3 || off >= sizeof(struct user)) return -EIO; if (off >= sizeof(struct pt_regs)) return 0; return put_user_reg(tsk, off >> 2, val); } #ifdef CONFIG_IWMMXT /* * Get the child iWMMXt state. */ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) return -ENODATA; iwmmxt_task_disable(thread); /* force it to ram */ return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE) ? -EFAULT : 0; } /* * Set the child iWMMXt state. */ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) return -EACCES; iwmmxt_task_release(thread); /* force a reload */ return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE) ? -EFAULT : 0; } #endif #ifdef CONFIG_CRUNCH /* * Get the child Crunch state. */ static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); crunch_task_disable(thread); /* force it to ram */ return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) ? -EFAULT : 0; } /* * Set the child Crunch state. */ static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); crunch_task_release(thread); /* force a reload */ return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) ? -EFAULT : 0; } #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT /* * Convert a virtual register number into an index for a thread_info * breakpoint array. Breakpoints are identified using positive numbers * whilst watchpoints are negative. The registers are laid out as pairs * of (address, control), each pair mapping to a unique hw_breakpoint struct. * Register 0 is reserved for describing resource information. */ static int ptrace_hbp_num_to_idx(long num) { if (num < 0) num = (ARM_MAX_BRP << 1) - num; return (num - 1) >> 1; } /* * Returns the virtual register number for the address of the * breakpoint at index idx. */ static long ptrace_hbp_idx_to_num(int idx) { long mid = ARM_MAX_BRP << 1; long num = (idx << 1) + 1; return num > mid ? mid - num : num; } /* * Handle hitting a HW-breakpoint. */ static void ptrace_hbptriggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); long num; int i; siginfo_t info; for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) if (current->thread.debug.hbp[i] == bp) break; num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i); info.si_signo = SIGTRAP; info.si_errno = (int)num; info.si_code = TRAP_HWBKPT; info.si_addr = (void __user *)(bkpt->trigger); force_sig_info(SIGTRAP, &info, current); } /* * Set ptrace breakpoint pointers to zero for this task. * This is required in order to prevent child processes from unregistering * breakpoints held by their parent. */ void clear_ptrace_hw_breakpoint(struct task_struct *tsk) { memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp)); } /* * Unregister breakpoints from this task and reset the pointers in * the thread_struct. */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) { if (t->debug.hbp[i]) { unregister_hw_breakpoint(t->debug.hbp[i]); t->debug.hbp[i] = NULL; } } } static u32 ptrace_get_hbp_resource_info(void) { u8 num_brps, num_wrps, debug_arch, wp_len; u32 reg = 0; num_brps = hw_breakpoint_slots(TYPE_INST); num_wrps = hw_breakpoint_slots(TYPE_DATA); debug_arch = arch_get_debug_arch(); wp_len = arch_get_max_wp_len(); reg |= debug_arch; reg <<= 8; reg |= wp_len; reg <<= 8; reg |= num_wrps; reg <<= 8; reg |= num_brps; return reg; } static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) { struct perf_event_attr attr; ptrace_breakpoint_init(&attr); /* Initialise fields to sane defaults. */ attr.bp_addr = 0; attr.bp_len = HW_BREAKPOINT_LEN_4; attr.bp_type = type; attr.disabled = 1; return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk); } static int ptrace_gethbpregs(struct task_struct *tsk, long num, unsigned long __user *data) { u32 reg; int idx, ret = 0; struct perf_event *bp; struct arch_hw_breakpoint_ctrl arch_ctrl; if (num == 0) { reg = ptrace_get_hbp_resource_info(); } else { idx = ptrace_hbp_num_to_idx(num); if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { ret = -EINVAL; goto out; } bp = tsk->thread.debug.hbp[idx]; if (!bp) { reg = 0; goto put; } arch_ctrl = counter_arch_bp(bp)->ctrl; /* * Fix up the len because we may have adjusted it * to compensate for an unaligned address. */ while (!(arch_ctrl.len & 0x1)) arch_ctrl.len >>= 1; if (num & 0x1) reg = bp->attr.bp_addr; else reg = encode_ctrl_reg(arch_ctrl); } put: if (put_user(reg, data)) ret = -EFAULT; out: return ret; } static int ptrace_sethbpregs(struct task_struct *tsk, long num, unsigned long __user *data) { int idx, gen_len, gen_type, implied_type, ret = 0; u32 user_val; struct perf_event *bp; struct arch_hw_breakpoint_ctrl ctrl; struct perf_event_attr attr; if (num == 0) goto out; else if (num < 0) implied_type = HW_BREAKPOINT_RW; else implied_type = HW_BREAKPOINT_X; idx = ptrace_hbp_num_to_idx(num); if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { ret = -EINVAL; goto out; } if (get_user(user_val, data)) { ret = -EFAULT; goto out; } bp = tsk->thread.debug.hbp[idx]; if (!bp) { bp = ptrace_hbp_create(tsk, implied_type); if (IS_ERR(bp)) { ret = PTR_ERR(bp); goto out; } tsk->thread.debug.hbp[idx] = bp; } attr = bp->attr; if (num & 0x1) { /* Address */ attr.bp_addr = user_val; } else { /* Control */ decode_ctrl_reg(user_val, &ctrl); ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type); if (ret) goto out; if ((gen_type & implied_type) != gen_type) { ret = -EINVAL; goto out; } attr.bp_len = gen_len; attr.bp_type = gen_type; attr.disabled = !ctrl.enabled; } ret = modify_user_hw_breakpoint(bp, &attr); out: return ret; } #endif /* regset get/set implementations */ static int gpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, sizeof(*regs)); } static int gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct pt_regs newregs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, sizeof(newregs)); if (ret) return ret; if (!valid_user_regs(&newregs)) return -EINVAL; *task_pt_regs(target) = newregs; return 0; } static int fpa_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &task_thread_info(target)->fpstate, 0, sizeof(struct user_fp)); } static int fpa_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct thread_info *thread = task_thread_info(target); thread->used_cp[1] = thread->used_cp[2] = 1; return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &thread->fpstate, 0, sizeof(struct user_fp)); } #ifdef CONFIG_VFP /* * VFP register get/set implementations. * * With respect to the kernel, struct user_fp is divided into three chunks: * 16 or 32 real VFP registers (d0-d15 or d0-31) * These are transferred to/from the real registers in the task's * vfp_hard_struct. The number of registers depends on the kernel * configuration. * * 16 or 0 fake VFP registers (d16-d31 or empty) * i.e., the user_vfp structure has space for 32 registers even if * the kernel doesn't have them all. * * vfp_get() reads this chunk as zero where applicable * vfp_set() ignores this chunk * * 1 word for the FPSCR * * The bounds-checking logic built into user_regset_copyout and friends * means that we can make a simple sequence of calls to map the relevant data * to/from the specified slice of the user regset structure. */ static int vfp_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; struct thread_info *thread = task_thread_info(target); struct vfp_hard_struct const *vfp = &thread->vfpstate.hard; const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); vfp_sync_hwstate(thread); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vfp->fpregs, user_fpregs_offset, user_fpregs_offset + sizeof(vfp->fpregs)); if (ret) return ret; ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, user_fpregs_offset + sizeof(vfp->fpregs), user_fpscr_offset); if (ret) return ret; return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vfp->fpscr, user_fpscr_offset, user_fpscr_offset + sizeof(vfp->fpscr)); } /* * For vfp_set() a read-modify-write is done on the VFP registers, * in order to avoid writing back a half-modified set of registers on * failure. */ static int vfp_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct thread_info *thread = task_thread_info(target); struct vfp_hard_struct new_vfp = thread->vfpstate.hard; const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpregs, user_fpregs_offset, user_fpregs_offset + sizeof(new_vfp.fpregs)); if (ret) return ret; ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, user_fpregs_offset + sizeof(new_vfp.fpregs), user_fpscr_offset); if (ret) return ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpscr, user_fpscr_offset, user_fpscr_offset + sizeof(new_vfp.fpscr)); if (ret) return ret; vfp_sync_hwstate(thread); thread->vfpstate.hard = new_vfp; vfp_flush_hwstate(thread); return 0; } #endif /* CONFIG_VFP */ enum arm_regset { REGSET_GPR, REGSET_FPR, #ifdef CONFIG_VFP REGSET_VFP, #endif }; static const struct user_regset arm_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(u32), .align = sizeof(u32), .get = gpr_get, .set = gpr_set }, [REGSET_FPR] = { /* * For the FPA regs in fpstate, the real fields are a mixture * of sizes, so pretend that the registers are word-sized: */ .core_note_type = NT_PRFPREG, .n = sizeof(struct user_fp) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .get = fpa_get, .set = fpa_set }, #ifdef CONFIG_VFP [REGSET_VFP] = { /* * Pretend that the VFP regs are word-sized, since the FPSCR is * a single word dangling at the end of struct user_vfp: */ .core_note_type = NT_ARM_VFP, .n = ARM_VFPREGS_SIZE / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .get = vfp_get, .set = vfp_set }, #endif /* CONFIG_VFP */ }; static const struct user_regset_view user_arm_view = { .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets) }; const struct user_regset_view *task_user_regset_view(struct task_struct *task) { return &user_arm_view; } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; unsigned long __user *datap = (unsigned long __user *) data; switch (request) { case PTRACE_PEEKUSR: ret = ptrace_read_user(child, addr, datap); break; case PTRACE_POKEUSR: ret = ptrace_write_user(child, addr, data); break; case PTRACE_GETREGS: ret = copy_regset_to_user(child, &user_arm_view, REGSET_GPR, 0, sizeof(struct pt_regs), datap); break; case PTRACE_SETREGS: ret = copy_regset_from_user(child, &user_arm_view, REGSET_GPR, 0, sizeof(struct pt_regs), datap); break; case PTRACE_GETFPREGS: ret = copy_regset_to_user(child, &user_arm_view, REGSET_FPR, 0, sizeof(union fp_state), datap); break; case PTRACE_SETFPREGS: ret = copy_regset_from_user(child, &user_arm_view, REGSET_FPR, 0, sizeof(union fp_state), datap); break; #ifdef CONFIG_IWMMXT case PTRACE_GETWMMXREGS: ret = ptrace_getwmmxregs(child, datap); break; case PTRACE_SETWMMXREGS: ret = ptrace_setwmmxregs(child, datap); break; #endif case PTRACE_GET_THREAD_AREA: ret = put_user(task_thread_info(child)->tp_value, datap); break; case PTRACE_SET_SYSCALL: task_thread_info(child)->syscall = data; ret = 0; break; #ifdef CONFIG_CRUNCH case PTRACE_GETCRUNCHREGS: ret = ptrace_getcrunchregs(child, datap); break; case PTRACE_SETCRUNCHREGS: ret = ptrace_setcrunchregs(child, datap); break; #endif #ifdef CONFIG_VFP case PTRACE_GETVFPREGS: ret = copy_regset_to_user(child, &user_arm_view, REGSET_VFP, 0, ARM_VFPREGS_SIZE, datap); break; case PTRACE_SETVFPREGS: ret = copy_regset_from_user(child, &user_arm_view, REGSET_VFP, 0, ARM_VFPREGS_SIZE, datap); break; #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT case PTRACE_GETHBPREGS: if (ptrace_get_breakpoints(child) < 0) return -ESRCH; ret = ptrace_gethbpregs(child, addr, (unsigned long __user *)data); ptrace_put_breakpoints(child); break; case PTRACE_SETHBPREGS: if (ptrace_get_breakpoints(child) < 0) return -ESRCH; ret = ptrace_sethbpregs(child, addr, (unsigned long __user *)data); ptrace_put_breakpoints(child); break; #endif default: ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) { unsigned long ip; if (!test_thread_flag(TIF_SYSCALL_TRACE)) return scno; if (!(current->ptrace & PT_PTRACED)) return scno; /* * Save IP. IP is used to denote syscall entry/exit: * IP = 0 -> entry, = 1 -> exit */ ip = regs->ARM_ip; regs->ARM_ip = why; current_thread_info()->syscall = scno; /* the 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (current->exit_code) { send_sig(current->exit_code, current, 1); current->exit_code = 0; } regs->ARM_ip = ip; return current_thread_info()->syscall; }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_4
crossvul-cpp_data_good_3486_21
/* * arch/sh/math-emu/math.c * * Copyright (C) 2006 Takashi YOSHII <takasi-y@ops.dti.ne.jp> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/perf_event.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/processor.h> #include <asm/io.h> #include "sfp-util.h" #include <math-emu/soft-fp.h> #include <math-emu/single.h> #include <math-emu/double.h> #define FPUL (fregs->fpul) #define FPSCR (fregs->fpscr) #define FPSCR_RM (FPSCR&3) #define FPSCR_DN ((FPSCR>>18)&1) #define FPSCR_PR ((FPSCR>>19)&1) #define FPSCR_SZ ((FPSCR>>20)&1) #define FPSCR_FR ((FPSCR>>21)&1) #define FPSCR_MASK 0x003fffffUL #define BANK(n) (n^(FPSCR_FR?16:0)) #define FR ((unsigned long*)(fregs->fp_regs)) #define FR0 (FR[BANK(0)]) #define FRn (FR[BANK(n)]) #define FRm (FR[BANK(m)]) #define DR ((unsigned long long*)(fregs->fp_regs)) #define DRn (DR[BANK(n)/2]) #define DRm (DR[BANK(m)/2]) #define XREG(n) (n^16) #define XFn (FR[BANK(XREG(n))]) #define XFm (FR[BANK(XREG(m))]) #define XDn (DR[BANK(XREG(n))/2]) #define XDm (DR[BANK(XREG(m))/2]) #define R0 (regs->regs[0]) #define Rn (regs->regs[n]) #define Rm (regs->regs[m]) #define WRITE(d,a) ({if(put_user(d, (typeof (d)*)a)) return -EFAULT;}) #define READ(d,a) ({if(get_user(d, (typeof (d)*)a)) return -EFAULT;}) #define PACK_S(r,f) FP_PACK_SP(&r,f) #define UNPACK_S(f,r) FP_UNPACK_SP(f,&r) #define PACK_D(r,f) \ {u32 t[2]; FP_PACK_DP(t,f); ((u32*)&r)[0]=t[1]; ((u32*)&r)[1]=t[0];} #define UNPACK_D(f,r) \ {u32 t[2]; t[0]=((u32*)&r)[1]; t[1]=((u32*)&r)[0]; FP_UNPACK_DP(f,t);} // 2 args instructions. #define BOTH_PRmn(op,x) \ FP_DECL_EX; if(FPSCR_PR) op(D,x,DRm,DRn); else op(S,x,FRm,FRn); #define CMP_X(SZ,R,M,N) do{ \ FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \ UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \ FP_CMP_##SZ(R, Fn, Fm, 2); }while(0) #define EQ_X(SZ,R,M,N) do{ \ FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \ UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \ FP_CMP_EQ_##SZ(R, Fn, Fm); }while(0) #define CMP(OP) ({ int r; BOTH_PRmn(OP##_X,r); r; }) static int fcmp_gt(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (CMP(CMP) > 0) regs->sr |= 1; else regs->sr &= ~1; return 0; } static int fcmp_eq(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (CMP(CMP /*EQ*/) == 0) regs->sr |= 1; else regs->sr &= ~1; return 0; } #define ARITH_X(SZ,OP,M,N) do{ \ FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); FP_DECL_##SZ(Fr); \ UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \ FP_##OP##_##SZ(Fr, Fn, Fm); \ PACK_##SZ(N, Fr); }while(0) static int fadd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { BOTH_PRmn(ARITH_X, ADD); return 0; } static int fsub(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { BOTH_PRmn(ARITH_X, SUB); return 0; } static int fmul(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { BOTH_PRmn(ARITH_X, MUL); return 0; } static int fdiv(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { BOTH_PRmn(ARITH_X, DIV); return 0; } static int fmac(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { FP_DECL_EX; FP_DECL_S(Fr); FP_DECL_S(Ft); FP_DECL_S(F0); FP_DECL_S(Fm); FP_DECL_S(Fn); UNPACK_S(F0, FR0); UNPACK_S(Fm, FRm); UNPACK_S(Fn, FRn); FP_MUL_S(Ft, Fm, F0); FP_ADD_S(Fr, Fn, Ft); PACK_S(FRn, Fr); return 0; } // to process fmov's extension (odd n for DR access XD). #define FMOV_EXT(x) if(x&1) x+=16-1 static int fmov_idx_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(n); READ(FRn, Rm + R0 + 4); n++; READ(FRn, Rm + R0); } else { READ(FRn, Rm + R0); } return 0; } static int fmov_mem_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(n); READ(FRn, Rm + 4); n++; READ(FRn, Rm); } else { READ(FRn, Rm); } return 0; } static int fmov_inc_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(n); READ(FRn, Rm + 4); n++; READ(FRn, Rm); Rm += 8; } else { READ(FRn, Rm); Rm += 4; } return 0; } static int fmov_reg_idx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(m); WRITE(FRm, Rn + R0 + 4); m++; WRITE(FRm, Rn + R0); } else { WRITE(FRm, Rn + R0); } return 0; } static int fmov_reg_mem(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(m); WRITE(FRm, Rn + 4); m++; WRITE(FRm, Rn); } else { WRITE(FRm, Rn); } return 0; } static int fmov_reg_dec(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(m); Rn -= 8; WRITE(FRm, Rn + 4); m++; WRITE(FRm, Rn); } else { Rn -= 4; WRITE(FRm, Rn); } return 0; } static int fmov_reg_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { if (FPSCR_SZ) { FMOV_EXT(m); FMOV_EXT(n); DRn = DRm; } else { FRn = FRm; } return 0; } static int fnop_mn(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) { return -EINVAL; } // 1 arg instructions. #define NOTYETn(i) static int i(struct sh_fpu_soft_struct *fregs, int n) \ { printk( #i " not yet done.\n"); return 0; } NOTYETn(ftrv) NOTYETn(fsqrt) NOTYETn(fipr) NOTYETn(fsca) NOTYETn(fsrra) #define EMU_FLOAT_X(SZ,N) do { \ FP_DECL_##SZ(Fn); \ FP_FROM_INT_##SZ(Fn, FPUL, 32, int); \ PACK_##SZ(N, Fn); }while(0) static int ffloat(struct sh_fpu_soft_struct *fregs, int n) { FP_DECL_EX; if (FPSCR_PR) EMU_FLOAT_X(D, DRn); else EMU_FLOAT_X(S, FRn); return 0; } #define EMU_FTRC_X(SZ,N) do { \ FP_DECL_##SZ(Fn); \ UNPACK_##SZ(Fn, N); \ FP_TO_INT_##SZ(FPUL, Fn, 32, 1); }while(0) static int ftrc(struct sh_fpu_soft_struct *fregs, int n) { FP_DECL_EX; if (FPSCR_PR) EMU_FTRC_X(D, DRn); else EMU_FTRC_X(S, FRn); return 0; } static int fcnvsd(struct sh_fpu_soft_struct *fregs, int n) { FP_DECL_EX; FP_DECL_S(Fn); FP_DECL_D(Fr); UNPACK_S(Fn, FPUL); FP_CONV(D, S, 2, 1, Fr, Fn); PACK_D(DRn, Fr); return 0; } static int fcnvds(struct sh_fpu_soft_struct *fregs, int n) { FP_DECL_EX; FP_DECL_D(Fn); FP_DECL_S(Fr); UNPACK_D(Fn, DRn); FP_CONV(S, D, 1, 2, Fr, Fn); PACK_S(FPUL, Fr); return 0; } static int fxchg(struct sh_fpu_soft_struct *fregs, int flag) { FPSCR ^= flag; return 0; } static int fsts(struct sh_fpu_soft_struct *fregs, int n) { FRn = FPUL; return 0; } static int flds(struct sh_fpu_soft_struct *fregs, int n) { FPUL = FRn; return 0; } static int fneg(struct sh_fpu_soft_struct *fregs, int n) { FRn ^= (1 << (_FP_W_TYPE_SIZE - 1)); return 0; } static int fabs(struct sh_fpu_soft_struct *fregs, int n) { FRn &= ~(1 << (_FP_W_TYPE_SIZE - 1)); return 0; } static int fld0(struct sh_fpu_soft_struct *fregs, int n) { FRn = 0; return 0; } static int fld1(struct sh_fpu_soft_struct *fregs, int n) { FRn = (_FP_EXPBIAS_S << (_FP_FRACBITS_S - 1)); return 0; } static int fnop_n(struct sh_fpu_soft_struct *fregs, int n) { return -EINVAL; } /// Instruction decoders. static int id_fxfd(struct sh_fpu_soft_struct *, int); static int id_fnxd(struct sh_fpu_soft_struct *, struct pt_regs *, int, int); static int (*fnxd[])(struct sh_fpu_soft_struct *, int) = { fsts, flds, ffloat, ftrc, fneg, fabs, fsqrt, fsrra, fld0, fld1, fcnvsd, fcnvds, fnop_n, fnop_n, fipr, id_fxfd }; static int (*fnmx[])(struct sh_fpu_soft_struct *, struct pt_regs *, int, int) = { fadd, fsub, fmul, fdiv, fcmp_eq, fcmp_gt, fmov_idx_reg, fmov_reg_idx, fmov_mem_reg, fmov_inc_reg, fmov_reg_mem, fmov_reg_dec, fmov_reg_reg, id_fnxd, fmac, fnop_mn}; static int id_fxfd(struct sh_fpu_soft_struct *fregs, int x) { const int flag[] = { FPSCR_SZ, FPSCR_PR, FPSCR_FR, 0 }; switch (x & 3) { case 3: fxchg(fregs, flag[x >> 2]); break; case 1: ftrv(fregs, x - 1); break; default: fsca(fregs, x); } return 0; } static int id_fnxd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int x, int n) { return (fnxd[x])(fregs, n); } static int id_fnmx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code) { int n = (code >> 8) & 0xf, m = (code >> 4) & 0xf, x = code & 0xf; return (fnmx[x])(fregs, regs, m, n); } static int id_sys(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code) { int n = ((code >> 8) & 0xf); unsigned long *reg = (code & 0x0010) ? &FPUL : &FPSCR; switch (code & 0xf0ff) { case 0x005a: case 0x006a: Rn = *reg; break; case 0x405a: case 0x406a: *reg = Rn; break; case 0x4052: case 0x4062: Rn -= 4; WRITE(*reg, Rn); break; case 0x4056: case 0x4066: READ(*reg, Rn); Rn += 4; break; default: return -EINVAL; } return 0; } static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_regs *regs) { if ((code & 0xf000) == 0xf000) return id_fnmx(fregs, regs, code); else return id_sys(fregs, regs, code); } /** * denormal_to_double - Given denormalized float number, * store double float * * @fpu: Pointer to sh_fpu_soft structure * @n: Index to FP register */ static void denormal_to_double(struct sh_fpu_soft_struct *fpu, int n) { unsigned long du, dl; unsigned long x = fpu->fpul; int exp = 1023 - 126; if (x != 0 && (x & 0x7f800000) == 0) { du = (x & 0x80000000); while ((x & 0x00800000) == 0) { x <<= 1; exp--; } x &= 0x007fffff; du |= (exp << 20) | (x >> 3); dl = x << 29; fpu->fp_regs[n] = du; fpu->fp_regs[n+1] = dl; } } /** * ieee_fpe_handler - Handle denormalized number exception * * @regs: Pointer to register structure * * Returns 1 when it's handled (should not cause exception). */ static int ieee_fpe_handler(struct pt_regs *regs) { unsigned short insn = *(unsigned short *)regs->pc; unsigned short finsn; unsigned long nextpc; siginfo_t info; int nib[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, (insn >> 4) & 0xf, insn & 0xf}; if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */ regs->pr = regs->pc + 4; if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */ nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3); finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */ if (regs->sr & 1) nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); else nextpc = regs->pc + 4; finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */ if (regs->sr & 1) nextpc = regs->pc + 4; else nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x4 && nib[3] == 0xb && (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */ nextpc = regs->regs[nib[1]]; finsn = *(unsigned short *) (regs->pc + 2); } else if (nib[0] == 0x0 && nib[3] == 0x3 && (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */ nextpc = regs->pc + 4 + regs->regs[nib[1]]; finsn = *(unsigned short *) (regs->pc + 2); } else if (insn == 0x000b) { /* rts */ nextpc = regs->pr; finsn = *(unsigned short *) (regs->pc + 2); } else { nextpc = regs->pc + 2; finsn = insn; } if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ struct task_struct *tsk = current; if ((tsk->thread.xstate->softfpu.fpscr & (1 << 17))) { /* FPU error */ denormal_to_double (&tsk->thread.xstate->softfpu, (finsn >> 8) & 0xf); tsk->thread.xstate->softfpu.fpscr &= ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); task_thread_info(tsk)->status |= TS_USEDFPU; } else { info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = FPE_FLTINV; info.si_addr = (void __user *)regs->pc; force_sig_info(SIGFPE, &info, tsk); } regs->pc = nextpc; return 1; } return 0; } asmlinkage void do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs regs) { struct task_struct *tsk = current; siginfo_t info; if (ieee_fpe_handler (&regs)) return; regs.pc += 2; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = FPE_FLTINV; info.si_addr = (void __user *)regs.pc; force_sig_info(SIGFPE, &info, tsk); } /** * fpu_init - Initialize FPU registers * @fpu: Pointer to software emulated FPU registers. */ static void fpu_init(struct sh_fpu_soft_struct *fpu) { int i; fpu->fpscr = FPSCR_INIT; fpu->fpul = 0; for (i = 0; i < 16; i++) { fpu->fp_regs[i] = 0; fpu->xfp_regs[i]= 0; } } /** * do_fpu_inst - Handle reserved instructions for FPU emulation * @inst: instruction code. * @regs: registers on stack. */ int do_fpu_inst(unsigned short inst, struct pt_regs *regs) { struct task_struct *tsk = current; struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { /* initialize once. */ fpu_init(fpu); task_thread_info(tsk)->status |= TS_USEDFPU; } return fpu_emulate(inst, fpu, regs); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_21
crossvul-cpp_data_good_2295_1
/* * Copyright (c) Christos Zoulas 2003. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: readelf.c,v 1.104 2014/10/17 15:49:00 christos Exp $") #endif #ifdef BUILTIN_ELF #include <string.h> #include <ctype.h> #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "readelf.h" #include "magic.h" #ifdef ELFCORE private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *); #endif private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int); private int doshn(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int, int); private size_t donote(struct magic_set *, void *, size_t, size_t, int, int, size_t, int *); #define ELF_ALIGN(a) ((((a) + align - 1) / align) * align) #define isquote(c) (strchr("'\"`", (c)) != NULL) private uint16_t getu16(int, uint16_t); private uint32_t getu32(int, uint32_t); private uint64_t getu64(int, uint64_t); #define MAX_PHNUM 256 #define MAX_SHNUM 1024 private int toomany(struct magic_set *ms, const char *name, uint16_t num) { if (file_printf(ms, ", too many %s header sections (%u)", name, num ) == -1) return -1; return 0; } private uint16_t getu16(int swap, uint16_t value) { union { uint16_t ui; char c[2]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[1]; retval.c[1] = tmpval.c[0]; return retval.ui; } else return value; } private uint32_t getu32(int swap, uint32_t value) { union { uint32_t ui; char c[4]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[3]; retval.c[1] = tmpval.c[2]; retval.c[2] = tmpval.c[1]; retval.c[3] = tmpval.c[0]; return retval.ui; } else return value; } private uint64_t getu64(int swap, uint64_t value) { union { uint64_t ui; char c[8]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[7]; retval.c[1] = tmpval.c[6]; retval.c[2] = tmpval.c[5]; retval.c[3] = tmpval.c[4]; retval.c[4] = tmpval.c[3]; retval.c[5] = tmpval.c[2]; retval.c[6] = tmpval.c[1]; retval.c[7] = tmpval.c[0]; return retval.ui; } else return value; } #define elf_getu16(swap, value) getu16(swap, value) #define elf_getu32(swap, value) getu32(swap, value) #define elf_getu64(swap, value) getu64(swap, value) #define xsh_addr (clazz == ELFCLASS32 \ ? (void *)&sh32 \ : (void *)&sh64) #define xsh_sizeof (clazz == ELFCLASS32 \ ? sizeof(sh32) \ : sizeof(sh64)) #define xsh_size (size_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_size) \ : elf_getu64(swap, sh64.sh_size)) #define xsh_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_offset) \ : elf_getu64(swap, sh64.sh_offset)) #define xsh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_type) \ : elf_getu32(swap, sh64.sh_type)) #define xsh_name (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_name) \ : elf_getu32(swap, sh64.sh_name)) #define xph_addr (clazz == ELFCLASS32 \ ? (void *) &ph32 \ : (void *) &ph64) #define xph_sizeof (clazz == ELFCLASS32 \ ? sizeof(ph32) \ : sizeof(ph64)) #define xph_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_type) \ : elf_getu32(swap, ph64.p_type)) #define xph_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_offset) \ : elf_getu64(swap, ph64.p_offset)) #define xph_align (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_align ? \ elf_getu32(swap, ph32.p_align) : 4) \ : (off_t) (ph64.p_align ? \ elf_getu64(swap, ph64.p_align) : 4))) #define xph_filesz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_filesz) \ : elf_getu64(swap, ph64.p_filesz))) #define xnh_addr (clazz == ELFCLASS32 \ ? (void *)&nh32 \ : (void *)&nh64) #define xph_memsz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_memsz) \ : elf_getu64(swap, ph64.p_memsz))) #define xnh_sizeof (clazz == ELFCLASS32 \ ? sizeof nh32 \ : sizeof nh64) #define xnh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_type) \ : elf_getu32(swap, nh64.n_type)) #define xnh_namesz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_namesz) \ : elf_getu32(swap, nh64.n_namesz)) #define xnh_descsz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_descsz) \ : elf_getu32(swap, nh64.n_descsz)) #define prpsoffsets(i) (clazz == ELFCLASS32 \ ? prpsoffsets32[i] \ : prpsoffsets64[i]) #define xcap_addr (clazz == ELFCLASS32 \ ? (void *)&cap32 \ : (void *)&cap64) #define xcap_sizeof (clazz == ELFCLASS32 \ ? sizeof cap32 \ : sizeof cap64) #define xcap_tag (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_tag) \ : elf_getu64(swap, cap64.c_tag)) #define xcap_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_un.c_val) \ : elf_getu64(swap, cap64.c_un.c_val)) #ifdef ELFCORE /* * Try larger offsets first to avoid false matches * from earlier data that happen to look like strings. */ static const size_t prpsoffsets32[] = { #ifdef USE_NT_PSINFO 104, /* SunOS 5.x (command line) */ 88, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 100, /* SunOS 5.x (command line) */ 84, /* SunOS 5.x (short name) */ 44, /* Linux (command line) */ 28, /* Linux 2.0.36 (short name) */ 8, /* FreeBSD */ }; static const size_t prpsoffsets64[] = { #ifdef USE_NT_PSINFO 152, /* SunOS 5.x (command line) */ 136, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 136, /* SunOS 5.x, 64-bit (command line) */ 120, /* SunOS 5.x, 64-bit (short name) */ 56, /* Linux (command line) */ 40, /* Linux (tested on core from 2.4.x, short name) */ 16, /* FreeBSD, 64-bit */ }; #define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0]) #define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0]) #define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64) /* * Look through the program headers of an executable image, searching * for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or * "FreeBSD"; if one is found, try looking in various places in its * contents for a 16-character string containing only printable * characters - if found, that string should be the name of the program * that dropped core. Note: right after that 16-character string is, * at least in SunOS 5.x (and possibly other SVR4-flavored systems) and * Linux, a longer string (80 characters, in 5.x, probably other * SVR4-flavored systems, and Linux) containing the start of the * command line for that program. * * SunOS 5.x core files contain two PT_NOTE sections, with the types * NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the * same info about the command name and command line, so it probably * isn't worthwhile to look for NT_PSINFO, but the offsets are provided * above (see USE_NT_PSINFO), in case we ever decide to do so. The * NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent; * the SunOS 5.x file command relies on this (and prefers the latter). * * The signal number probably appears in a section of type NT_PRSTATUS, * but that's also rather OS-dependent, in ways that are harder to * dissect with heuristics, so I'm not bothering with the signal number. * (I suppose the signal number could be of interest in situations where * you don't have the binary of the program that dropped core; if you * *do* have that binary, the debugger will probably tell you what * signal it was.) */ #define OS_STYLE_SVR4 0 #define OS_STYLE_FREEBSD 1 #define OS_STYLE_NETBSD 2 private const char os_style_names[][8] = { "SVR4", "FreeBSD", "NetBSD", }; #define FLAGS_DID_CORE 0x01 #define FLAGS_DID_NOTE 0x02 #define FLAGS_DID_BUILD_ID 0x04 #define FLAGS_DID_CORE_STYLE 0x08 #define FLAGS_IS_CORE 0x10 private int dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; if (xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags); if (offset == 0) break; } } return 0; } #endif static void do_note_netbsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for NetBSD") == -1) return; /* * The version number used to be stuck as 199905, and was thus * basically content-free. Newer versions of NetBSD have fixed * this and now use the encoding of __NetBSD_Version__: * * MMmmrrpp00 * * M = major version * m = minor version * r = release ["",A-Z,Z[A-Z] but numeric] * p = patchlevel */ if (desc > 100000000U) { uint32_t ver_patch = (desc / 100) % 100; uint32_t ver_rel = (desc / 10000) % 100; uint32_t ver_min = (desc / 1000000) % 100; uint32_t ver_maj = desc / 100000000; if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1) return; if (ver_rel == 0 && ver_patch != 0) { if (file_printf(ms, ".%u", ver_patch) == -1) return; } else if (ver_rel != 0) { while (ver_rel > 26) { if (file_printf(ms, "Z") == -1) return; ver_rel -= 26; } if (file_printf(ms, "%c", 'A' + ver_rel - 1) == -1) return; } } } static void do_note_freebsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for FreeBSD") == -1) return; /* * Contents is __FreeBSD_version, whose relation to OS * versions is defined by a huge table in the Porter's * Handbook. This is the general scheme: * * Releases: * Mmp000 (before 4.10) * Mmi0p0 (before 5.0) * Mmm0p0 * * Development branches: * Mmpxxx (before 4.6) * Mmp1xx (before 4.10) * Mmi1xx (before 5.0) * M000xx (pre-M.0) * Mmm1xx * * M = major version * m = minor version * i = minor version increment (491000 -> 4.10) * p = patchlevel * x = revision * * The first release of FreeBSD to use ELF by default * was version 3.0. */ if (desc == 460002) { if (file_printf(ms, " 4.6.2") == -1) return; } else if (desc < 460100) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10) == -1) return; if (desc / 1000 % 10 > 0) if (file_printf(ms, ".%d", desc / 1000 % 10) == -1) return; if ((desc % 1000 > 0) || (desc % 100000 == 0)) if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc < 500000) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10 + desc / 1000 % 10) == -1) return; if (desc / 100 % 10 > 0) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } else { if (file_printf(ms, " %d.%d", desc / 100000, desc / 1000 % 100) == -1) return; if ((desc / 100 % 10 > 0) || (desc % 100000 / 100 == 0)) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } } private size_t donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size, int clazz, int swap, size_t align, int *flags) { Elf32_Nhdr nh32; Elf64_Nhdr nh64; size_t noff, doff; #ifdef ELFCORE int os_style = -1; #endif uint32_t namesz, descsz; unsigned char *nbuf = CAST(unsigned char *, vbuf); if (xnh_sizeof + offset > size) { /* * We're out of note headers. */ return xnh_sizeof + offset; } (void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof); offset += xnh_sizeof; namesz = xnh_namesz; descsz = xnh_descsz; if ((namesz == 0) && (descsz == 0)) { /* * We're out of note headers. */ return (offset >= size) ? offset : size; } if (namesz & 0x80000000) { (void)file_printf(ms, ", bad note name size 0x%lx", (unsigned long)namesz); return 0; } if (descsz & 0x80000000) { (void)file_printf(ms, ", bad note description size 0x%lx", (unsigned long)descsz); return 0; } noff = offset; doff = ELF_ALIGN(offset + namesz); if (offset + namesz > size) { /* * We're past the end of the buffer. */ return doff; } offset = ELF_ALIGN(doff + descsz); if (doff + descsz > size) { /* * We're past the end of the buffer. */ return (offset >= size) ? offset : size; } if ((*flags & (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) == (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) goto core; if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 && xnh_type == NT_GNU_VERSION && descsz == 2) { file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]); } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_VERSION && descsz == 16) { uint32_t desc[4]; (void)memcpy(desc, &nbuf[doff], sizeof(desc)); if (file_printf(ms, ", for GNU/") == -1) return size; switch (elf_getu32(swap, desc[0])) { case GNU_OS_LINUX: if (file_printf(ms, "Linux") == -1) return size; break; case GNU_OS_HURD: if (file_printf(ms, "Hurd") == -1) return size; break; case GNU_OS_SOLARIS: if (file_printf(ms, "Solaris") == -1) return size; break; case GNU_OS_KFREEBSD: if (file_printf(ms, "kFreeBSD") == -1) return size; break; case GNU_OS_KNETBSD: if (file_printf(ms, "kNetBSD") == -1) return size; break; default: if (file_printf(ms, "<unknown>") == -1) return size; } if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]), elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) { uint8_t desc[20]; uint32_t i; if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" : "sha1") == -1) return size; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return size; *flags |= FLAGS_DID_BUILD_ID; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 && xnh_type == NT_NETBSD_PAX && descsz == 4) { static const char *pax[] = { "+mprotect", "-mprotect", "+segvguard", "-segvguard", "+ASLR", "-ASLR", }; uint32_t desc; size_t i; int did = 0; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (desc && file_printf(ms, ", PaX: ") == -1) return size; for (i = 0; i < __arraycount(pax); i++) { if (((1 << i) & desc) == 0) continue; if (file_printf(ms, "%s%s", did++ ? "," : "", pax[i]) == -1) return size; } } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { switch (xnh_type) { case NT_NETBSD_VERSION: if (descsz == 4) { do_note_netbsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } break; case NT_NETBSD_MARCH: if (file_printf(ms, ", compiled for: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; case NT_NETBSD_CMODEL: if (file_printf(ms, ", compiler model: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; default: if (file_printf(ms, ", note=%u", xnh_type) == -1) return size; break; } return size; } if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) { if (xnh_type == NT_FREEBSD_VERSION && descsz == 4) { do_note_freebsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 && xnh_type == NT_OPENBSD_VERSION && descsz == 4) { if (file_printf(ms, ", for OpenBSD") == -1) return size; /* Content of note is always 0 */ *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 && xnh_type == NT_DRAGONFLY_VERSION && descsz == 4) { uint32_t desc; if (file_printf(ms, ", for DragonFly") == -1) return size; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, " %d.%d.%d", desc / 100000, desc / 10000 % 10, desc % 10000) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } core: /* * Sigh. The 2.0.36 kernel in Debian 2.1, at * least, doesn't correctly implement name * sections, in core dumps, as specified by * the "Program Linking" section of "UNIX(R) System * V Release 4 Programmer's Guide: ANSI C and * Programming Support Tools", because my copy * clearly says "The first 'namesz' bytes in 'name' * contain a *null-terminated* [emphasis mine] * character representation of the entry's owner * or originator", but the 2.0.36 kernel code * doesn't include the terminating null in the * name.... */ if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) || (namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) { os_style = OS_STYLE_SVR4; } if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) { os_style = OS_STYLE_FREEBSD; } if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11) == 0)) { os_style = OS_STYLE_NETBSD; } #ifdef ELFCORE if ((*flags & FLAGS_DID_CORE) != 0) return size; if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) { if (file_printf(ms, ", %s-style", os_style_names[os_style]) == -1) return size; *flags |= FLAGS_DID_CORE_STYLE; } switch (os_style) { case OS_STYLE_NETBSD: if (xnh_type == NT_NETBSD_CORE_PROCINFO) { uint32_t signo; /* * Extract the program name. It is at * offset 0x7c, and is up to 32-bytes, * including the terminating NUL. */ if (file_printf(ms, ", from '%.31s'", &nbuf[doff + 0x7c]) == -1) return size; /* * Extract the signal number. It is at * offset 0x08. */ (void)memcpy(&signo, &nbuf[doff + 0x08], sizeof(signo)); if (file_printf(ms, " (signal %u)", elf_getu32(swap, signo)) == -1) return size; *flags |= FLAGS_DID_CORE; return size; } break; default: if (xnh_type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) { size_t i, j; unsigned char c; /* * Extract the program name. We assume * it to be 16 characters (that's what it * is in SunOS 5.x and Linux). * * Unfortunately, it's at a different offset * in various OSes, so try multiple offsets. * If the characters aren't all printable, * reject it. */ for (i = 0; i < NOFFSETS; i++) { unsigned char *cname, *cp; size_t reloffset = prpsoffsets(i); size_t noffset = doff + reloffset; size_t k; for (j = 0; j < 16; j++, noffset++, reloffset++) { /* * Make sure we're not past * the end of the buffer; if * we are, just give up. */ if (noffset >= size) goto tryanother; /* * Make sure we're not past * the end of the contents; * if we are, this obviously * isn't the right offset. */ if (reloffset >= descsz) goto tryanother; c = nbuf[noffset]; if (c == '\0') { /* * A '\0' at the * beginning is * obviously wrong. * Any other '\0' * means we're done. */ if (j == 0) goto tryanother; else break; } else { /* * A nonprintable * character is also * wrong. */ if (!isprint(c) || isquote(c)) goto tryanother; } } /* * Well, that worked. */ /* * Try next offsets, in case this match is * in the middle of a string. */ for (k = i + 1 ; k < NOFFSETS ; k++) { size_t no; int adjust = 1; if (prpsoffsets(k) >= prpsoffsets(i)) continue; for (no = doff + prpsoffsets(k); no < doff + prpsoffsets(i); no++) adjust = adjust && isprint(nbuf[no]); if (adjust) i = k; } cname = (unsigned char *) &nbuf[doff + prpsoffsets(i)]; for (cp = cname; *cp && isprint(*cp); cp++) continue; /* * Linux apparently appends a space at the end * of the command line: remove it. */ while (cp > cname && isspace(cp[-1])) cp--; if (file_printf(ms, ", from '%.*s'", (int)(cp - cname), cname) == -1) return size; *flags |= FLAGS_DID_CORE; return size; tryanother: ; } } break; } #endif return offset; } /* SunOS 5.x hardware capability descriptions */ typedef struct cap_desc { uint64_t cd_mask; const char *cd_name; } cap_desc_t; static const cap_desc_t cap_desc_sparc[] = { { AV_SPARC_MUL32, "MUL32" }, { AV_SPARC_DIV32, "DIV32" }, { AV_SPARC_FSMULD, "FSMULD" }, { AV_SPARC_V8PLUS, "V8PLUS" }, { AV_SPARC_POPC, "POPC" }, { AV_SPARC_VIS, "VIS" }, { AV_SPARC_VIS2, "VIS2" }, { AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" }, { AV_SPARC_FMAF, "FMAF" }, { AV_SPARC_FJFMAU, "FJFMAU" }, { AV_SPARC_IMA, "IMA" }, { 0, NULL } }; static const cap_desc_t cap_desc_386[] = { { AV_386_FPU, "FPU" }, { AV_386_TSC, "TSC" }, { AV_386_CX8, "CX8" }, { AV_386_SEP, "SEP" }, { AV_386_AMD_SYSC, "AMD_SYSC" }, { AV_386_CMOV, "CMOV" }, { AV_386_MMX, "MMX" }, { AV_386_AMD_MMX, "AMD_MMX" }, { AV_386_AMD_3DNow, "AMD_3DNow" }, { AV_386_AMD_3DNowx, "AMD_3DNowx" }, { AV_386_FXSR, "FXSR" }, { AV_386_SSE, "SSE" }, { AV_386_SSE2, "SSE2" }, { AV_386_PAUSE, "PAUSE" }, { AV_386_SSE3, "SSE3" }, { AV_386_MON, "MON" }, { AV_386_CX16, "CX16" }, { AV_386_AHF, "AHF" }, { AV_386_TSCP, "TSCP" }, { AV_386_AMD_SSE4A, "AMD_SSE4A" }, { AV_386_POPCNT, "POPCNT" }, { AV_386_AMD_LZCNT, "AMD_LZCNT" }, { AV_386_SSSE3, "SSSE3" }, { AV_386_SSE4_1, "SSE4.1" }, { AV_386_SSE4_2, "SSE4.2" }, { 0, NULL } }; private int doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int mach, int strtab) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */ char name[50]; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) == -1) { file_badread(ms); return -1; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if (pread(fd, name, sizeof(name), name_off + xsh_name) == -1) { file_badread(ms); return -1; } name[sizeof(name) - 1] = '\0'; if (strcmp(name, ".debug_info") == 0) stripped = 0; if (pread(fd, xsh_addr, xsh_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) == -1) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "0x%" INT64_T_FORMAT "x = 0x%" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; } /* * Look through the program headers of an executable image, searching * for a PT_INTERP section; if one is found, it's dynamically linked, * otherwise it's statically linked. */ private int dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int sh_num) { Elf32_Phdr ph32; Elf64_Phdr ph64; const char *linking_style = "statically"; const char *shared_libraries = ""; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; size_t offset, align, len; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xph_type) { case PT_DYNAMIC: linking_style = "dynamically"; break; case PT_INTERP: shared_libraries = " (uses shared libs)"; break; default: if (xph_offset > fsize) { /* Maybe warn here? */ continue; } break; } /* Things we can determine when we seek */ switch (xph_type) { case PT_NOTE: if ((align = xph_align) & 0x80000000UL) { if (file_printf(ms, ", invalid note alignment 0x%lx", (unsigned long)align) == -1) return -1; align = 4; } if (sh_num) break; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); bufsize = pread(fd, nbuf, len, xph_offset); if (bufsize == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, align, flags); if (offset == 0) break; } break; default: break; } } if (file_printf(ms, ", %s linked%s", linking_style, shared_libraries) == -1) return -1; return 0; } protected int file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf, size_t nbytes) { union { int32_t l; char c[sizeof (int32_t)]; } u; int clazz; int swap; struct stat st; off_t fsize; int flags = 0; Elf32_Ehdr elf32hdr; Elf64_Ehdr elf64hdr; uint16_t type, phnum, shnum; if (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) return 0; /* * ELF executables have multiple section headers in arbitrary * file locations and thus file(1) cannot determine it from easily. * Instead we traverse thru all section headers until a symbol table * one is found or else the binary is stripped. * Return immediately if it's not ELF (so we avoid pipe2file unless needed). */ if (buf[EI_MAG0] != ELFMAG0 || (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1) || buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) return 0; /* * If we cannot seek, it must be a pipe, socket or fifo. */ if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE)) fd = file_pipe2file(ms, fd, buf, nbytes); if (fstat(fd, &st) == -1) { file_badread(ms); return -1; } fsize = st.st_size; clazz = buf[EI_CLASS]; switch (clazz) { case ELFCLASS32: #undef elf_getu #define elf_getu(a, b) elf_getu32(a, b) #undef elfhdr #define elfhdr elf32hdr #include "elfclass.h" case ELFCLASS64: #undef elf_getu #define elf_getu(a, b) elf_getu64(a, b) #undef elfhdr #define elfhdr elf64hdr #include "elfclass.h" default: if (file_printf(ms, ", unknown class %d", clazz) == -1) return -1; break; } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/good_2295_1
crossvul-cpp_data_good_3453_0
#include <config.h> #include "ftpd.h" #include "dynamic.h" #include "ftpwho-update.h" #include "globals.h" #include "messages.h" #ifdef WITH_DIRALIASES # include "diraliases.h" #endif #ifdef WITH_TLS # include "tls.h" #endif #ifdef WITH_DMALLOC # include <dmalloc.h> #endif static void antiidle(void) { if (noopidle == (time_t) -1) { noopidle = time(NULL); } else { if ((time(NULL) - noopidle) > (time_t) idletime_noop) { die(421, LOG_INFO, MSG_TIMEOUT_NOOP, (unsigned long) idletime_noop); } } } /* * Introduce a random delay, to avoid guessing existing user names by * mesuring delay. It's especially true when LDAP is used. * No need to call usleep2() because we are root at this point. */ static void randomdelay(void) { usleep(rand() % 15000UL); /* dummy... no need for arc4 */ } /* * Simple but fast command-line reader. We break the FTP protocol here, * because we deny access to files with strange characters in their name. * Now, I seriously doubt that clients should be allowed to upload files * with carriage returns, bells, cursor moves and other fancy stuff in the * names. It can indirectly lead to security flaws with scripts, it's * annoying for the sysadmin, it can be a client error, it can bring unexpected * results on some filesystems, etc. So control chars are replaced by "_". * Better be safe than 100% RFC crap compliant but unsafe. If you really want * RFC compliance, define RFC_CONFORMANT_PARSER. But I will hate you. * * RFC_CONFORMANT_LINES is another thing that clients should implement * properly (and it's trivial to do) : lines must be ended with \r\n . * Guess what ? * Some broken clients are just sending \n ... Grrrrrrrrrrrr !!!!!!!!!!!!!!! * * -Frank. */ static size_t scanned; static size_t readnbd; static void flush_cmd(void) { scanned = readnbd = (size_t) 0U; } int sfgets(void) { struct pollfd pfd; int pollret; ssize_t readnb; signed char seen_r = 0; if (scanned > (size_t) 0U) { /* support pipelining */ readnbd -= scanned; memmove(cmd, cmd + scanned, readnbd); /* safe */ scanned = (size_t) 0U; } pfd.fd = clientfd; #ifdef __APPLE_CC__ pfd.events = POLLIN | POLLERR | POLLHUP; #else pfd.events = POLLIN | POLLPRI | POLLERR | POLLHUP; #endif while (scanned < cmdsize) { if (scanned >= readnbd) { /* nothing left in the buffer */ pfd.revents = 0; while ((pollret = poll(&pfd, 1U, idletime * 1000UL)) < 0 && errno == EINTR); if (pollret == 0) { return -1; } if (pollret <= 0 || (pfd.revents & (POLLERR | POLLHUP | POLLNVAL)) != 0) { return -2; } if ((pfd.revents & (POLLIN | POLLPRI)) == 0) { continue; } if (readnbd >= cmdsize) { break; } #ifdef WITH_TLS if (tls_cnx != NULL) { while ((readnb = SSL_read (tls_cnx, cmd + readnbd, cmdsize - readnbd)) < (ssize_t) 0 && errno == EINTR); } else #endif { while ((readnb = read(clientfd, cmd + readnbd, cmdsize - readnbd)) < (ssize_t) 0 && errno == EINTR); } if (readnb <= (ssize_t) 0) { return -2; } readnbd += readnb; if (readnbd > cmdsize) { return -2; } } #ifdef RFC_CONFORMANT_LINES if (seen_r != 0) { #endif if (cmd[scanned] == '\n') { #ifndef RFC_CONFORMANT_LINES if (seen_r != 0) { #endif cmd[scanned - 1U] = 0; #ifndef RFC_CONFORMANT_LINES } else { cmd[scanned] = 0; } #endif if (++scanned >= readnbd) { /* non-pipelined command */ scanned = readnbd = (size_t) 0U; } return 0; } seen_r = 0; #ifdef RFC_CONFORMANT_LINES } #endif if (ISCTRLCODE(cmd[scanned])) { if (cmd[scanned] == '\r') { seen_r = 1; } #ifdef RFC_CONFORMANT_PARSER /* disabled by default, intentionnaly */ else if (cmd[scanned] == 0) { cmd[scanned] = '\n'; } #else /* replace control chars with _ */ cmd[scanned] = '_'; #endif } scanned++; } die(421, LOG_WARNING, MSG_LINE_TOO_LONG); /* don't remove this */ return 0; /* to please GCC */ } /* Replace extra spaces before and after a string with '_' */ #ifdef MINIMAL # define revealextraspc(X) (X) #else static char *revealextraspc(char * const s_) { unsigned char *s = (unsigned char *) s_; unsigned char *sn; if (s == NULL) { return s_; } simplify(s_); while (*s != 0U && isspace(*s)) { *s++ = '_'; } if (*s == 0U) { return s_; } sn = s; do { sn++; } while (*sn != 0U); do { sn--; if (!isspace(*sn)) { break; } *sn = '_'; } while (sn != s); return s_; } #endif #ifdef WITH_RFC2640 char *charset_client2fs(const char * const string) { char *output = NULL, *output_; size_t inlen, outlen, outlen_; inlen = strlen(string); outlen_ = outlen = inlen * (size_t) 4U + (size_t) 1U; if (outlen <= inlen || (output_ = output = calloc(outlen, (size_t) 1U)) == NULL) { die_mem(); } if (utf8 > 0 && strcasecmp(charset_fs, "utf-8") != 0) { if (iconv(iconv_fd_utf82fs, (char **) &string, &inlen, &output_, &outlen_) == (size_t) -1) { strncpy(output, string, outlen); } } else if (utf8 <= 0 && strcasecmp(charset_fs, charset_client) != 0) { if (iconv(iconv_fd_client2fs, (char **) &string, &inlen, &output_, &outlen_) == (size_t) -1) { strncpy(output, string, outlen); } } else { strncpy(output, string, outlen); } output[outlen - 1] = 0; return output; } #endif void parser(void) { char *arg; #ifndef MINIMAL char *sitearg; #endif #ifdef WITH_RFC2640 char *narg = NULL; #endif size_t n; #ifdef IMPLICIT_TLS (void) tls_init_new_session(); data_protection_level = CPL_PRIVATE; #endif for (;;) { xferfd = -1; if (state_needs_update != 0) { state_needs_update = 0; setprocessname("pure-ftpd (IDLE)"); #ifdef FTPWHO if (shm_data_cur != NULL) { ftpwho_lock(); shm_data_cur->state = FTPWHO_STATE_IDLE; *shm_data_cur->filename = 0; ftpwho_unlock(); } #endif } doreply(); alarm(idletime * 2); switch (sfgets()) { case -1: #ifdef BORING_MODE die(421, LOG_INFO, MSG_TIMEOUT); #else die(421, LOG_INFO, MSG_TIMEOUT_PARSER); #endif case -2: return; } #ifdef DEBUG if (debug != 0) { addreply(0, "%s", cmd); } #endif n = (size_t) 0U; while ((isalpha((unsigned char) cmd[n]) || cmd[n] == '@') && n < cmdsize) { cmd[n] = (char) tolower((unsigned char) cmd[n]); n++; } if (n >= cmdsize) { /* overparanoid, it should never happen */ die(421, LOG_WARNING, MSG_LINE_TOO_LONG); } if (n == (size_t) 0U) { nop: addreply_noformat(500, "?"); continue; } #ifdef SKIP_COMMAND_TRAILING_SPACES while (isspace((unsigned char) cmd[n]) && n < cmdsize) { cmd[n++] = 0; } arg = cmd + n; while (cmd[n] != 0 && n < cmdsize) { n++; } n--; while (isspace((unsigned char) cmd[n])) { cmd[n--] = 0; } #else if (cmd[n] == 0) { arg = cmd + n; } else if (isspace((unsigned char) cmd[n])) { cmd[n] = 0; arg = cmd + n + 1; } else { goto nop; } #endif if (logging != 0) { #ifdef DEBUG logfile(LOG_DEBUG, MSG_DEBUG_COMMAND " [%s] [%s]", cmd, arg); #else logfile(LOG_DEBUG, MSG_DEBUG_COMMAND " [%s] [%s]", cmd, strcmp(cmd, "pass") ? arg : "<*>"); #endif } #ifdef WITH_RFC2640 narg = charset_client2fs(arg); arg = narg; #endif /* * antiidle() is called with dummy commands, usually used by clients * who are wanting extra idle time. We give them some, but not too much. * When we jump to wayout, the idle timer is not zeroed. It means that * we didn't issue an 'active' command like RETR. */ #ifndef MINIMAL if (!strcmp(cmd, "noop")) { antiidle(); donoop(); goto wayout; } #endif if (!strcmp(cmd, "user")) { #ifdef WITH_TLS if (enforce_tls_auth > 1 && tls_cnx == NULL) { die(421, LOG_WARNING, MSG_TLS_NEEDED); } #endif douser(arg); } else if (!strcmp(cmd, "acct")) { addreply(202, MSG_WHOAREYOU); } else if (!strcmp(cmd, "pass")) { if (guest == 0) { randomdelay(); } dopass(arg); } else if (!strcmp(cmd, "quit")) { addreply(221, MSG_GOODBYE, (unsigned long long) ((uploaded + 1023ULL) / 1024ULL), (unsigned long long) ((downloaded + 1023ULL) / 1024ULL)); return; } else if (!strcmp(cmd, "syst")) { antiidle(); addreply_noformat(215, "UNIX Type: L8"); goto wayout; #ifdef WITH_TLS } else if (enforce_tls_auth > 0 && !strcmp(cmd, "auth") && !strcasecmp(arg, "tls")) { addreply_noformat(234, "AUTH TLS OK."); doreply(); if (tls_cnx == NULL) { flush_cmd(); (void) tls_init_new_session(); } goto wayout; } else if (!strcmp(cmd, "pbsz")) { addreply_noformat(tls_cnx == NULL ? 503 : 200, "PBSZ=0"); } else if (!strcmp(cmd, "prot")) { if (tls_cnx == NULL) { addreply_noformat(503, MSG_PROT_BEFORE_PBSZ); goto wayout; } switch (*arg) { case 0: addreply_noformat(503, MSG_MISSING_ARG); data_protection_level = CPL_NONE; break; case 'C': if (arg[1] == 0) { addreply(200, MSG_PROT_OK, "clear"); data_protection_level = CPL_CLEAR; break; } case 'S': case 'E': if (arg[1] == 0) { addreply(200, MSG_PROT_UNKNOWN_LEVEL, arg, "private"); data_protection_level = CPL_PRIVATE; break; } case 'P': if (arg[1] == 0) { addreply(200, MSG_PROT_OK, "private"); data_protection_level = CPL_PRIVATE; break; } default: addreply_noformat(534, "Fallback to [C]"); data_protection_level = CPL_CLEAR; break; } #endif } else if (!strcmp(cmd, "auth") || !strcmp(cmd, "adat")) { addreply_noformat(500, MSG_AUTH_UNIMPLEMENTED); } else if (!strcmp(cmd, "type")) { antiidle(); dotype(arg); goto wayout; } else if (!strcmp(cmd, "mode")) { antiidle(); domode(arg); goto wayout; #ifndef MINIMAL } else if (!strcmp(cmd, "feat")) { dofeat(); goto wayout; } else if (!strcmp(cmd, "opts")) { doopts(arg); goto wayout; #endif } else if (!strcmp(cmd, "stru")) { dostru(arg); goto wayout; #ifndef MINIMAL } else if (!strcmp(cmd, "help")) { goto help_site; #endif #ifdef DEBUG } else if (!strcmp(cmd, "xdbg")) { debug++; addreply(200, MSG_XDBG_OK, debug); goto wayout; #endif } else if (loggedin == 0) { /* from this point, all commands need authentication */ addreply_noformat(530, MSG_NOT_LOGGED_IN); goto wayout; } else { if (!strcmp(cmd, "cwd") || !strcmp(cmd, "xcwd")) { antiidle(); docwd(arg); goto wayout; } else if (!strcmp(cmd, "port")) { doport(arg); #ifndef MINIMAL } else if (!strcmp(cmd, "eprt")) { doeprt(arg); } else if (!strcmp(cmd, "esta") && disallow_passive == 0 && STORAGE_FAMILY(force_passive_ip) == 0) { doesta(); } else if (!strcmp(cmd, "estp")) { doestp(); #endif } else if (disallow_passive == 0 && (!strcmp(cmd, "pasv") || !strcmp(cmd, "p@sw"))) { dopasv(0); } else if (disallow_passive == 0 && (!strcmp(cmd, "epsv") && (broken_client_compat == 0 || STORAGE_FAMILY(ctrlconn) == AF_INET6))) { if (!strcasecmp(arg, "all")) { epsv_all = 1; addreply_noformat(220, MSG_ACTIVE_DISABLED); } else if (!strcmp(arg, "2") && !v6ready) { addreply_noformat(522, MSG_ONLY_IPV4); } else { dopasv(1); } #ifndef MINIMAL } else if (disallow_passive == 0 && !strcmp(cmd, "spsv")) { dopasv(2); } else if (!strcmp(cmd, "allo")) { if (*arg == 0) { addreply_noformat(501, MSG_STAT_FAILURE); } else { const off_t size = (off_t) strtoull(arg, NULL, 10); if (size < (off_t) 0) { addreply_noformat(501, MSG_STAT_FAILURE); } else { doallo(size); } } #endif } else if (!strcmp(cmd, "pwd") || !strcmp(cmd, "xpwd")) { #ifdef WITH_RFC2640 char *nwd; #endif antiidle(); #ifdef WITH_RFC2640 nwd = charset_fs2client(wd); addreply(257, "\"%s\" " MSG_IS_YOUR_CURRENT_LOCATION, nwd); free(nwd); #else addreply(257, "\"%s\" " MSG_IS_YOUR_CURRENT_LOCATION, wd); #endif goto wayout; } else if (!strcmp(cmd, "cdup") || !strcmp(cmd, "xcup")) { docwd(".."); } else if (!strcmp(cmd, "retr")) { if (*arg != 0) { #ifdef WITH_TLS if (enforce_tls_auth == 3 && data_protection_level != CPL_PRIVATE) { addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED); } else #endif { doretr(arg); } } else { addreply_noformat(501, MSG_NO_FILE_NAME); } } else if (!strcmp(cmd, "rest")) { antiidle(); if (*arg != 0) { dorest(arg); } else { addreply_noformat(501, MSG_NO_RESTART_POINT); restartat = (off_t) 0; } goto wayout; } else if (!strcmp(cmd, "dele")) { if (*arg != 0) { dodele(arg); } else { addreply_noformat(501, MSG_NO_FILE_NAME); } } else if (!strcmp(cmd, "stor")) { arg = revealextraspc(arg); if (*arg != 0) { #ifdef WITH_TLS if (enforce_tls_auth == 3 && data_protection_level != CPL_PRIVATE) { addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED); } else #endif { dostor(arg, 0, autorename); } } else { addreply_noformat(501, MSG_NO_FILE_NAME); } } else if (!strcmp(cmd, "appe")) { arg = revealextraspc(arg); if (*arg != 0) { #ifdef WITH_TLS if (enforce_tls_auth == 3 && data_protection_level != CPL_PRIVATE) { addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED); } else #endif { dostor(arg, 1, 0); } } else { addreply_noformat(501, MSG_NO_FILE_NAME); } #ifndef MINIMAL } else if (!strcmp(cmd, "stou")) { #ifdef WITH_TLS if (enforce_tls_auth == 3 && data_protection_level != CPL_PRIVATE) { addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED); } else #endif { dostou(); } #endif #ifndef DISABLE_MKD_RMD } else if (!strcmp(cmd, "mkd") || !strcmp(cmd, "xmkd")) { arg = revealextraspc(arg); if (*arg != 0) { domkd(arg); } else { addreply_noformat(501, MSG_NO_DIRECTORY_NAME); } } else if (!strcmp(cmd, "rmd") || !strcmp(cmd, "xrmd")) { if (*arg != 0) { dormd(arg); } else { addreply_noformat(550, MSG_NO_DIRECTORY_NAME); } #endif #ifndef MINIMAL } else if (!strcmp(cmd, "stat")) { if (*arg != 0) { modern_listings = 0; donlist(arg, 1, 1, 1, 1); } else { addreply_noformat(211, "http://www.pureftpd.org/"); } #endif } else if (!strcmp(cmd, "list")) { #ifndef MINIMAL modern_listings = 0; #endif #ifdef WITH_TLS if (enforce_tls_auth == 3 && data_protection_level != CPL_PRIVATE) { addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED); } else #endif { donlist(arg, 0, 1, 0, 1); } } else if (!strcmp(cmd, "nlst")) { #ifndef MINIMAL modern_listings = 0; #endif #ifdef WITH_TLS if (enforce_tls_auth == 3 && data_protection_level != CPL_PRIVATE) { addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED); } else #endif { donlist(arg, 0, 0, 0, broken_client_compat); } #ifndef MINIMAL } else if (!strcmp(cmd, "mlst")) { #ifdef WITH_TLS if (enforce_tls_auth == 3 && data_protection_level != CPL_PRIVATE) { addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED); } else #endif { domlst(*arg != 0 ? arg : "."); } } else if (!strcmp(cmd, "mlsd")) { modern_listings = 1; #ifdef WITH_TLS if (enforce_tls_auth == 3 && data_protection_level != CPL_PRIVATE) { addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED); } else #endif { donlist(arg, 0, 1, 1, 0); } #endif } else if (!strcmp(cmd, "abor")) { addreply_noformat(226, MSG_ABOR_SUCCESS); #ifndef MINIMAL } else if (!strcmp(cmd, "site")) { if ((sitearg = arg) != NULL) { while (*sitearg != 0 && !isspace((unsigned char) *sitearg)) { sitearg++; } if (*sitearg != 0) { *sitearg++ = 0; } } if (!strcasecmp(arg, "idle")) { if (sitearg == NULL || *sitearg == 0) { addreply_noformat(501, "SITE IDLE: " MSG_MISSING_ARG); } else { unsigned long int i = 0; i = strtoul(sitearg, &sitearg, 10); if (sitearg && *sitearg) addreply(501, MSG_GARBAGE_FOUND " : %s", sitearg); else if (i > MAX_SITE_IDLE) addreply_noformat(501, MSG_VALUE_TOO_LARGE); else { idletime = i; addreply(200, MSG_IDLE_TIME, idletime); idletime_noop = (double) idletime * 2.0; } } } else if (!strcasecmp(arg, "time")) { dositetime(); } else if (!strcasecmp(arg, "help")) { help_site: addreply_noformat(214, MSG_SITE_HELP CRLF # ifdef WITH_DIRALIASES " ALIAS" CRLF # endif " CHMOD" CRLF " IDLE" CRLF " UTIME"); addreply_noformat(214, "Pure-FTPd - http://pureftpd.org/"); } else if (!strcasecmp(arg, "chmod")) { char *sitearg2; mode_t mode; parsechmod: if (sitearg == NULL || *sitearg == 0) { addreply_noformat(501, MSG_MISSING_ARG); goto chmod_wayout; } sitearg2 = sitearg; while (*sitearg2 != 0 && !isspace((unsigned char) *sitearg2)) { sitearg2++; } while (*sitearg2 != 0 && isspace((unsigned char) *sitearg2)) { sitearg2++; } if (*sitearg2 == 0) { addreply_noformat(550, MSG_NO_FILE_NAME); goto chmod_wayout; } mode = (mode_t) strtoul(sitearg, NULL, 8); if (mode > (mode_t) 07777) { addreply_noformat(501, MSG_BAD_CHMOD); goto chmod_wayout; } dochmod(sitearg2, mode); chmod_wayout: (void) 0; } else if (!strcasecmp(arg, "utime")) { char *sitearg2; if (sitearg == NULL || *sitearg == 0) { addreply_noformat(501, MSG_NO_FILE_NAME); goto utime_wayout; } if ((sitearg2 = strrchr(sitearg, ' ')) == NULL || sitearg2 == sitearg) { addreply_noformat(501, MSG_MISSING_ARG); goto utime_wayout; } if (strcasecmp(sitearg2, " UTC") != 0) { addreply_noformat(500, "UTC Only"); goto utime_wayout; } *sitearg2-- = 0; if ((sitearg2 = strrchr(sitearg, ' ')) == NULL || sitearg2 == sitearg) { utime_no_arg: addreply_noformat(501, MSG_MISSING_ARG); goto utime_wayout; } *sitearg2-- = 0; if ((sitearg2 = strrchr(sitearg, ' ')) == NULL || sitearg2 == sitearg) { goto utime_no_arg; } *sitearg2-- = 0; if ((sitearg2 = strrchr(sitearg, ' ')) == NULL || sitearg2 == sitearg) { goto utime_no_arg; } *sitearg2++ = 0; if (*sitearg2 == 0) { goto utime_no_arg; } doutime(sitearg, sitearg2); utime_wayout: (void) 0; # ifdef WITH_DIRALIASES } else if (!strcasecmp(arg, "alias")) { if (sitearg == NULL || *sitearg == 0) { print_aliases(); } else { const char *alias; if ((alias = lookup_alias(sitearg)) != NULL) { addreply(214, MSG_ALIASES_ALIAS, sitearg, alias); } else { addreply(502, MSG_ALIASES_UNKNOWN, sitearg); } } # endif } else if (*arg != 0) { addreply(500, "SITE %s " MSG_UNKNOWN_EXTENSION, arg); } else { addreply_noformat(500, "SITE: " MSG_MISSING_ARG); } #endif } else if (!strcmp(cmd, "mdtm")) { domdtm(arg); } else if (!strcmp(cmd, "size")) { dosize(arg); #ifndef MINIMAL } else if (!strcmp(cmd, "chmod")) { sitearg = arg; goto parsechmod; #endif } else if (!strcmp(cmd, "rnfr")) { if (*arg != 0) { dornfr(arg); } else { addreply_noformat(550, MSG_NO_FILE_NAME); } } else if (!strcmp(cmd, "rnto")) { arg = revealextraspc(arg); if (*arg != 0) { dornto(arg); } else { addreply_noformat(550, MSG_NO_FILE_NAME); } } else { addreply_noformat(500, MSG_UNKNOWN_COMMAND); } } noopidle = (time_t) -1; wayout: #ifdef WITH_RFC2640 free(narg); narg = NULL; #endif #ifdef THROTTLING if (throttling_delay != 0UL) { usleep2(throttling_delay); } #else (void) 0; #endif } }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3453_0
crossvul-cpp_data_bad_3432_4
/* * runtime.c: Runtime functions * * Authors: * Jonathan Pryor * * Copyright 2010 Novell, Inc (http://www.novell.com) */ #include <config.h> #include <glib.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/class.h> #include <mono/metadata/class-internals.h> #include <mono/metadata/runtime.h> static void fire_process_exit_event (MonoDomain *domain, gpointer user_data) { MonoClassField *field; gpointer pa [2]; MonoObject *delegate, *exc; field = mono_class_get_field_from_name (mono_defaults.appdomain_class, "ProcessExit"); g_assert (field); delegate = *(MonoObject **)(((char *)domain->domain) + field->offset); if (delegate == NULL) return; pa [0] = domain; pa [1] = NULL; mono_runtime_delegate_invoke (delegate, pa, &exc); } void mono_runtime_shutdown (void) { mono_domain_foreach (fire_process_exit_event, NULL); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3432_4
crossvul-cpp_data_good_3486_16
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/i386/mm/fault.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Modified by Cort Dougan and Paul Mackerras. * * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/perf_event.h> #include <linux/magic.h> #include <asm/firmware.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/tlbflush.h> #include <asm/siginfo.h> #include <mm/mmu_decl.h> #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs) { int ret = 0; /* kprobe_running() needs smp_processor_id() */ if (!user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 11)) ret = 1; preempt_enable(); } return ret; } #else static inline int notify_page_fault(struct pt_regs *regs) { return 0; } #endif /* * Check whether the instruction at regs->nip is a store using * an update addressing form which will update r1. */ static int store_updates_sp(struct pt_regs *regs) { unsigned int inst; if (get_user(inst, (unsigned int __user *)regs->nip)) return 0; /* check for 1 in the rA field */ if (((inst >> 16) & 0x1f) != 1) return 0; /* check major opcode */ switch (inst >> 26) { case 37: /* stwu */ case 39: /* stbu */ case 45: /* sthu */ case 53: /* stfsu */ case 55: /* stfdu */ return 1; case 62: /* std or stdu */ return (inst & 3) == 1; case 31: /* check minor opcode */ switch ((inst >> 1) & 0x3ff) { case 181: /* stdux */ case 183: /* stwux */ case 247: /* stbux */ case 439: /* sthux */ case 695: /* stfsux */ case 759: /* stfdux */ return 1; } } return 0; } /* * For 600- and 800-family processors, the error_code parameter is DSISR * for a data fault, SRR1 for an instruction fault. For 400-family processors * the error_code parameter is ESR for a data fault, 0 for an instruction * fault. * For 64-bit processors, the error_code parameter is * - DSISR for a non-SLB data access fault, * - SRR1 & 0x08000000 for a non-SLB instruction access fault * - 0 any SLB fault. * * The return value is 0 if the fault was handled, or the signal * number if this is a kernel fault that can't be handled here. */ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; siginfo_t info; int code = SEGV_MAPERR; int is_write = 0, ret; int trap = TRAP(regs); int is_exec = trap == 0x400; #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) /* * Fortunately the bit assignments in SRR1 for an instruction * fault and DSISR for a data fault are mostly the same for the * bits we are interested in. But there are some bits which * indicate errors in DSISR but can validly be set in SRR1. */ if (trap == 0x400) error_code &= 0x48200000; else is_write = error_code & DSISR_ISSTORE; #else is_write = error_code & ESR_DST; #endif /* CONFIG_4xx || CONFIG_BOOKE */ if (notify_page_fault(regs)) return 0; if (unlikely(debugger_fault_handler(regs))) return 0; /* On a kernel SLB miss we can only check for a valid exception entry */ if (!user_mode(regs) && (address >= TASK_SIZE)) return SIGSEGV; #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ defined(CONFIG_PPC_BOOK3S_64)) if (error_code & DSISR_DABRMATCH) { /* DABR match */ do_dabr(regs, address, error_code); return 0; } #endif if (in_atomic() || mm == NULL) { if (!user_mode(regs)) return SIGSEGV; /* in_atomic() in user mode is really bad, as is current->mm == NULL. */ printk(KERN_EMERG "Page fault in user mode with " "in_atomic() = %d mm = %p\n", in_atomic(), mm); printk(KERN_EMERG "NIP = %lx MSR = %lx\n", regs->nip, regs->msr); die("Weird page fault", regs, SIGSEGV); } perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an * erroneous fault occurring in a code path which already holds mmap_sem * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the * exceptions table. * * As the vast majority of faults will be valid we will only perform * the source reference check when there is a possibility of a deadlock. * Attempt to lock the address space, if we cannot we then validate the * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ if (!down_read_trylock(&mm->mmap_sem)) { if (!user_mode(regs) && !search_exception_tables(regs->nip)) goto bad_area_nosemaphore; down_read(&mm->mmap_sem); } vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; /* * N.B. The POWER/Open ABI allows programs to access up to * 288 bytes below the stack pointer. * The kernel signal delivery code writes up to about 1.5kB * below the stack pointer (r1) before decrementing it. * The exec code can write slightly over 640kB to the stack * before setting the user r1. Thus we allow the stack to * expand to 1MB without further checks. */ if (address + 0x100000 < vma->vm_end) { /* get user regs even if this fault is in kernel mode */ struct pt_regs *uregs = current->thread.regs; if (uregs == NULL) goto bad_area; /* * A user-mode access to an address a long way below * the stack pointer is only valid if the instruction * is one which would update the stack pointer to the * address accessed if the instruction completed, * i.e. either stwu rs,n(r1) or stwux rs,r1,rb * (or the byte, halfword, float or double forms). * * If we don't check this then any write to the area * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ if (address + 2048 < uregs->gpr[1] && (!user_mode(regs) || !store_updates_sp(regs))) goto bad_area; } if (expand_stack(vma, address)) goto bad_area; good_area: code = SEGV_ACCERR; #if defined(CONFIG_6xx) if (error_code & 0x95700000) /* an error such as lwarx to I/O controller space, address matching DABR, eciwx, etc. */ goto bad_area; #endif /* CONFIG_6xx */ #if defined(CONFIG_8xx) /* 8xx sometimes need to load a invalid/non-present TLBs. * These must be invalidated separately as linux mm don't. */ if (error_code & 0x40000000) /* no translation? */ _tlbil_va(address, 0, 0, 0); /* The MPC8xx seems to always set 0x80000000, which is * "undefined". Of those that can be set, this is the only * one which seems bad. */ if (error_code & 0x10000000) /* Guarded storage error. */ goto bad_area; #endif /* CONFIG_8xx */ if (is_exec) { #ifdef CONFIG_PPC_STD_MMU /* Protection fault on exec go straight to failure on * Hash based MMUs as they either don't support per-page * execute permission, or if they do, it's handled already * at the hash level. This test would probably have to * be removed if we change the way this works to make hash * processors use the same I/D cache coherency mechanism * as embedded. */ if (error_code & DSISR_PROTFAULT) goto bad_area; #endif /* CONFIG_PPC_STD_MMU */ /* * Allow execution from readable areas if the MMU does not * provide separate controls over reading and executing. * * Note: That code used to not be enabled for 4xx/BookE. * It is now as I/D cache coherency for these is done at * set_pte_at() time and I see no reason why the test * below wouldn't be valid on those processors. This -may- * break programs compiled with a really old ABI though. */ if (!(vma->vm_flags & VM_EXEC) && (cpu_has_feature(CPU_FTR_NOEXECUTE) || !(vma->vm_flags & (VM_READ | VM_WRITE)))) goto bad_area; /* a write */ } else if (is_write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; /* a read */ } else { /* protection fault */ if (error_code & 0x08000000) goto bad_area; if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(ret & VM_FAULT_ERROR)) { if (ret & VM_FAULT_OOM) goto out_of_memory; else if (ret & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (ret & VM_FAULT_MAJOR) { current->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { preempt_disable(); get_lppaca()->page_ins += (1 << PAGE_FACTOR); preempt_enable(); } #endif } else { current->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); return 0; bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { _exception(SIGSEGV, regs, code, address); return 0; } if (is_exec && (error_code & DSISR_PROTFAULT) && printk_ratelimit()) printk(KERN_CRIT "kernel tried to execute NX-protected" " page (%lx) - exploit attempt? (uid: %d)\n", address, current_uid()); return SIGSEGV; /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (!user_mode(regs)) return SIGKILL; pagefault_out_of_memory(); return 0; do_sigbus: up_read(&mm->mmap_sem); if (user_mode(regs)) { info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void __user *)address; force_sig_info(SIGBUS, &info, current); return 0; } return SIGBUS; } /* * bad_page_fault is called when we have a bad access from the kernel. * It is called from the DSI and ISI handlers in head.S and from some * of the procedures in traps.c. */ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) { const struct exception_table_entry *entry; unsigned long *stackend; /* Are we prepared to handle this fault? */ if ((entry = search_exception_tables(regs->nip)) != NULL) { regs->nip = entry->fixup; return; } /* kernel has accessed a bad area */ switch (regs->trap) { case 0x300: case 0x380: printk(KERN_ALERT "Unable to handle kernel paging request for " "data at address 0x%08lx\n", regs->dar); break; case 0x400: case 0x480: printk(KERN_ALERT "Unable to handle kernel paging request for " "instruction fetch\n"); break; default: printk(KERN_ALERT "Unable to handle kernel paging request for " "unknown fault\n"); break; } printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", regs->nip); stackend = end_of_stack(current); if (current != &init_task && *stackend != STACK_END_MAGIC) printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); die("Kernel access of bad area", regs, sig); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_16
crossvul-cpp_data_bad_2393_0
/* * Copyright (c) Christos Zoulas 2003. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: readelf.c,v 1.115 2014/12/16 20:53:05 christos Exp $") #endif #ifdef BUILTIN_ELF #include <string.h> #include <ctype.h> #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "readelf.h" #include "magic.h" #ifdef ELFCORE private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, uint16_t *); #endif private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int, int *, uint16_t *); private int doshn(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int, int, int *, uint16_t *); private size_t donote(struct magic_set *, void *, size_t, size_t, int, int, size_t, int *, uint16_t *); #define ELF_ALIGN(a) ((((a) + align - 1) / align) * align) #define isquote(c) (strchr("'\"`", (c)) != NULL) private uint16_t getu16(int, uint16_t); private uint32_t getu32(int, uint32_t); private uint64_t getu64(int, uint64_t); #define MAX_PHNUM 128 #define MAX_SHNUM 32768 #define SIZE_UNKNOWN ((off_t)-1) private int toomany(struct magic_set *ms, const char *name, uint16_t num) { if (file_printf(ms, ", too many %s (%u)", name, num ) == -1) return -1; return 0; } private uint16_t getu16(int swap, uint16_t value) { union { uint16_t ui; char c[2]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[1]; retval.c[1] = tmpval.c[0]; return retval.ui; } else return value; } private uint32_t getu32(int swap, uint32_t value) { union { uint32_t ui; char c[4]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[3]; retval.c[1] = tmpval.c[2]; retval.c[2] = tmpval.c[1]; retval.c[3] = tmpval.c[0]; return retval.ui; } else return value; } private uint64_t getu64(int swap, uint64_t value) { union { uint64_t ui; char c[8]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[7]; retval.c[1] = tmpval.c[6]; retval.c[2] = tmpval.c[5]; retval.c[3] = tmpval.c[4]; retval.c[4] = tmpval.c[3]; retval.c[5] = tmpval.c[2]; retval.c[6] = tmpval.c[1]; retval.c[7] = tmpval.c[0]; return retval.ui; } else return value; } #define elf_getu16(swap, value) getu16(swap, value) #define elf_getu32(swap, value) getu32(swap, value) #define elf_getu64(swap, value) getu64(swap, value) #define xsh_addr (clazz == ELFCLASS32 \ ? (void *)&sh32 \ : (void *)&sh64) #define xsh_sizeof (clazz == ELFCLASS32 \ ? sizeof(sh32) \ : sizeof(sh64)) #define xsh_size (size_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_size) \ : elf_getu64(swap, sh64.sh_size)) #define xsh_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_offset) \ : elf_getu64(swap, sh64.sh_offset)) #define xsh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_type) \ : elf_getu32(swap, sh64.sh_type)) #define xsh_name (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_name) \ : elf_getu32(swap, sh64.sh_name)) #define xph_addr (clazz == ELFCLASS32 \ ? (void *) &ph32 \ : (void *) &ph64) #define xph_sizeof (clazz == ELFCLASS32 \ ? sizeof(ph32) \ : sizeof(ph64)) #define xph_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_type) \ : elf_getu32(swap, ph64.p_type)) #define xph_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_offset) \ : elf_getu64(swap, ph64.p_offset)) #define xph_align (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_align ? \ elf_getu32(swap, ph32.p_align) : 4) \ : (off_t) (ph64.p_align ? \ elf_getu64(swap, ph64.p_align) : 4))) #define xph_filesz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_filesz) \ : elf_getu64(swap, ph64.p_filesz))) #define xnh_addr (clazz == ELFCLASS32 \ ? (void *)&nh32 \ : (void *)&nh64) #define xph_memsz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_memsz) \ : elf_getu64(swap, ph64.p_memsz))) #define xnh_sizeof (clazz == ELFCLASS32 \ ? sizeof nh32 \ : sizeof nh64) #define xnh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_type) \ : elf_getu32(swap, nh64.n_type)) #define xnh_namesz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_namesz) \ : elf_getu32(swap, nh64.n_namesz)) #define xnh_descsz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_descsz) \ : elf_getu32(swap, nh64.n_descsz)) #define prpsoffsets(i) (clazz == ELFCLASS32 \ ? prpsoffsets32[i] \ : prpsoffsets64[i]) #define xcap_addr (clazz == ELFCLASS32 \ ? (void *)&cap32 \ : (void *)&cap64) #define xcap_sizeof (clazz == ELFCLASS32 \ ? sizeof cap32 \ : sizeof cap64) #define xcap_tag (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_tag) \ : elf_getu64(swap, cap64.c_tag)) #define xcap_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_un.c_val) \ : elf_getu64(swap, cap64.c_un.c_val)) #ifdef ELFCORE /* * Try larger offsets first to avoid false matches * from earlier data that happen to look like strings. */ static const size_t prpsoffsets32[] = { #ifdef USE_NT_PSINFO 104, /* SunOS 5.x (command line) */ 88, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 100, /* SunOS 5.x (command line) */ 84, /* SunOS 5.x (short name) */ 44, /* Linux (command line) */ 28, /* Linux 2.0.36 (short name) */ 8, /* FreeBSD */ }; static const size_t prpsoffsets64[] = { #ifdef USE_NT_PSINFO 152, /* SunOS 5.x (command line) */ 136, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 136, /* SunOS 5.x, 64-bit (command line) */ 120, /* SunOS 5.x, 64-bit (short name) */ 56, /* Linux (command line) */ 40, /* Linux (tested on core from 2.4.x, short name) */ 16, /* FreeBSD, 64-bit */ }; #define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0]) #define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0]) #define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64) /* * Look through the program headers of an executable image, searching * for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or * "FreeBSD"; if one is found, try looking in various places in its * contents for a 16-character string containing only printable * characters - if found, that string should be the name of the program * that dropped core. Note: right after that 16-character string is, * at least in SunOS 5.x (and possibly other SVR4-flavored systems) and * Linux, a longer string (80 characters, in 5.x, probably other * SVR4-flavored systems, and Linux) containing the start of the * command line for that program. * * SunOS 5.x core files contain two PT_NOTE sections, with the types * NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the * same info about the command name and command line, so it probably * isn't worthwhile to look for NT_PSINFO, but the offsets are provided * above (see USE_NT_PSINFO), in case we ever decide to do so. The * NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent; * the SunOS 5.x file command relies on this (and prefers the latter). * * The signal number probably appears in a section of type NT_PRSTATUS, * but that's also rather OS-dependent, in ways that are harder to * dissect with heuristics, so I'm not bothering with the signal number. * (I suppose the signal number could be of interest in situations where * you don't have the binary of the program that dropped core; if you * *do* have that binary, the debugger will probably tell you what * signal it was.) */ #define OS_STYLE_SVR4 0 #define OS_STYLE_FREEBSD 1 #define OS_STYLE_NETBSD 2 private const char os_style_names[][8] = { "SVR4", "FreeBSD", "NetBSD", }; #define FLAGS_DID_CORE 0x001 #define FLAGS_DID_OS_NOTE 0x002 #define FLAGS_DID_BUILD_ID 0x004 #define FLAGS_DID_CORE_STYLE 0x008 #define FLAGS_DID_NETBSD_PAX 0x010 #define FLAGS_DID_NETBSD_MARCH 0x020 #define FLAGS_DID_NETBSD_CMODEL 0x040 #define FLAGS_DID_NETBSD_UNKNOWN 0x080 #define FLAGS_IS_CORE 0x100 private int dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, uint16_t *notecount) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += size; if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags, notecount); if (offset == 0) break; } } return 0; } #endif static void do_note_netbsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for NetBSD") == -1) return; /* * The version number used to be stuck as 199905, and was thus * basically content-free. Newer versions of NetBSD have fixed * this and now use the encoding of __NetBSD_Version__: * * MMmmrrpp00 * * M = major version * m = minor version * r = release ["",A-Z,Z[A-Z] but numeric] * p = patchlevel */ if (desc > 100000000U) { uint32_t ver_patch = (desc / 100) % 100; uint32_t ver_rel = (desc / 10000) % 100; uint32_t ver_min = (desc / 1000000) % 100; uint32_t ver_maj = desc / 100000000; if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1) return; if (ver_rel == 0 && ver_patch != 0) { if (file_printf(ms, ".%u", ver_patch) == -1) return; } else if (ver_rel != 0) { while (ver_rel > 26) { if (file_printf(ms, "Z") == -1) return; ver_rel -= 26; } if (file_printf(ms, "%c", 'A' + ver_rel - 1) == -1) return; } } } static void do_note_freebsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for FreeBSD") == -1) return; /* * Contents is __FreeBSD_version, whose relation to OS * versions is defined by a huge table in the Porter's * Handbook. This is the general scheme: * * Releases: * Mmp000 (before 4.10) * Mmi0p0 (before 5.0) * Mmm0p0 * * Development branches: * Mmpxxx (before 4.6) * Mmp1xx (before 4.10) * Mmi1xx (before 5.0) * M000xx (pre-M.0) * Mmm1xx * * M = major version * m = minor version * i = minor version increment (491000 -> 4.10) * p = patchlevel * x = revision * * The first release of FreeBSD to use ELF by default * was version 3.0. */ if (desc == 460002) { if (file_printf(ms, " 4.6.2") == -1) return; } else if (desc < 460100) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10) == -1) return; if (desc / 1000 % 10 > 0) if (file_printf(ms, ".%d", desc / 1000 % 10) == -1) return; if ((desc % 1000 > 0) || (desc % 100000 == 0)) if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc < 500000) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10 + desc / 1000 % 10) == -1) return; if (desc / 100 % 10 > 0) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } else { if (file_printf(ms, " %d.%d", desc / 100000, desc / 1000 % 100) == -1) return; if ((desc / 100 % 10 > 0) || (desc % 100000 / 100 == 0)) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } } private int do_bid_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap __attribute__((__unused__)), uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) { uint8_t desc[20]; uint32_t i; *flags |= FLAGS_DID_BUILD_ID; if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" : "sha1") == -1) return 1; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return 1; return 1; } return 0; } private int do_os_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 && type == NT_GNU_VERSION && descsz == 2) { *flags |= FLAGS_DID_OS_NOTE; file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]); return 1; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_VERSION && descsz == 16) { uint32_t desc[4]; (void)memcpy(desc, &nbuf[doff], sizeof(desc)); *flags |= FLAGS_DID_OS_NOTE; if (file_printf(ms, ", for GNU/") == -1) return 1; switch (elf_getu32(swap, desc[0])) { case GNU_OS_LINUX: if (file_printf(ms, "Linux") == -1) return 1; break; case GNU_OS_HURD: if (file_printf(ms, "Hurd") == -1) return 1; break; case GNU_OS_SOLARIS: if (file_printf(ms, "Solaris") == -1) return 1; break; case GNU_OS_KFREEBSD: if (file_printf(ms, "kFreeBSD") == -1) return 1; break; case GNU_OS_KNETBSD: if (file_printf(ms, "kNetBSD") == -1) return 1; break; default: if (file_printf(ms, "<unknown>") == -1) return 1; } if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]), elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1) return 1; return 1; } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { if (type == NT_NETBSD_VERSION && descsz == 4) { *flags |= FLAGS_DID_OS_NOTE; do_note_netbsd_version(ms, swap, &nbuf[doff]); return 1; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) { if (type == NT_FREEBSD_VERSION && descsz == 4) { *flags |= FLAGS_DID_OS_NOTE; do_note_freebsd_version(ms, swap, &nbuf[doff]); return 1; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 && type == NT_OPENBSD_VERSION && descsz == 4) { *flags |= FLAGS_DID_OS_NOTE; if (file_printf(ms, ", for OpenBSD") == -1) return 1; /* Content of note is always 0 */ return 1; } if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 && type == NT_DRAGONFLY_VERSION && descsz == 4) { uint32_t desc; *flags |= FLAGS_DID_OS_NOTE; if (file_printf(ms, ", for DragonFly") == -1) return 1; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, " %d.%d.%d", desc / 100000, desc / 10000 % 10, desc % 10000) == -1) return 1; return 1; } return 0; } private int do_pax_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 && type == NT_NETBSD_PAX && descsz == 4) { static const char *pax[] = { "+mprotect", "-mprotect", "+segvguard", "-segvguard", "+ASLR", "-ASLR", }; uint32_t desc; size_t i; int did = 0; *flags |= FLAGS_DID_NETBSD_PAX; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (desc && file_printf(ms, ", PaX: ") == -1) return 1; for (i = 0; i < __arraycount(pax); i++) { if (((1 << i) & desc) == 0) continue; if (file_printf(ms, "%s%s", did++ ? "," : "", pax[i]) == -1) return 1; } return 1; } return 0; } private int do_core_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags, size_t size, int clazz) { #ifdef ELFCORE int os_style = -1; /* * Sigh. The 2.0.36 kernel in Debian 2.1, at * least, doesn't correctly implement name * sections, in core dumps, as specified by * the "Program Linking" section of "UNIX(R) System * V Release 4 Programmer's Guide: ANSI C and * Programming Support Tools", because my copy * clearly says "The first 'namesz' bytes in 'name' * contain a *null-terminated* [emphasis mine] * character representation of the entry's owner * or originator", but the 2.0.36 kernel code * doesn't include the terminating null in the * name.... */ if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) || (namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) { os_style = OS_STYLE_SVR4; } if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) { os_style = OS_STYLE_FREEBSD; } if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11) == 0)) { os_style = OS_STYLE_NETBSD; } if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) { if (file_printf(ms, ", %s-style", os_style_names[os_style]) == -1) return 1; *flags |= FLAGS_DID_CORE_STYLE; } switch (os_style) { case OS_STYLE_NETBSD: if (type == NT_NETBSD_CORE_PROCINFO) { char sbuf[512]; uint32_t signo; /* * Extract the program name. It is at * offset 0x7c, and is up to 32-bytes, * including the terminating NUL. */ if (file_printf(ms, ", from '%.31s'", file_printable(sbuf, sizeof(sbuf), (const char *)&nbuf[doff + 0x7c])) == -1) return 1; /* * Extract the signal number. It is at * offset 0x08. */ (void)memcpy(&signo, &nbuf[doff + 0x08], sizeof(signo)); if (file_printf(ms, " (signal %u)", elf_getu32(swap, signo)) == -1) return 1; *flags |= FLAGS_DID_CORE; return 1; } break; default: if (type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) { size_t i, j; unsigned char c; /* * Extract the program name. We assume * it to be 16 characters (that's what it * is in SunOS 5.x and Linux). * * Unfortunately, it's at a different offset * in various OSes, so try multiple offsets. * If the characters aren't all printable, * reject it. */ for (i = 0; i < NOFFSETS; i++) { unsigned char *cname, *cp; size_t reloffset = prpsoffsets(i); size_t noffset = doff + reloffset; size_t k; for (j = 0; j < 16; j++, noffset++, reloffset++) { /* * Make sure we're not past * the end of the buffer; if * we are, just give up. */ if (noffset >= size) goto tryanother; /* * Make sure we're not past * the end of the contents; * if we are, this obviously * isn't the right offset. */ if (reloffset >= descsz) goto tryanother; c = nbuf[noffset]; if (c == '\0') { /* * A '\0' at the * beginning is * obviously wrong. * Any other '\0' * means we're done. */ if (j == 0) goto tryanother; else break; } else { /* * A nonprintable * character is also * wrong. */ if (!isprint(c) || isquote(c)) goto tryanother; } } /* * Well, that worked. */ /* * Try next offsets, in case this match is * in the middle of a string. */ for (k = i + 1 ; k < NOFFSETS; k++) { size_t no; int adjust = 1; if (prpsoffsets(k) >= prpsoffsets(i)) continue; for (no = doff + prpsoffsets(k); no < doff + prpsoffsets(i); no++) adjust = adjust && isprint(nbuf[no]); if (adjust) i = k; } cname = (unsigned char *) &nbuf[doff + prpsoffsets(i)]; for (cp = cname; *cp && isprint(*cp); cp++) continue; /* * Linux apparently appends a space at the end * of the command line: remove it. */ while (cp > cname && isspace(cp[-1])) cp--; if (file_printf(ms, ", from '%.*s'", (int)(cp - cname), cname) == -1) return 1; *flags |= FLAGS_DID_CORE; return 1; tryanother: ; } } break; } #endif return 0; } private size_t donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size, int clazz, int swap, size_t align, int *flags, uint16_t *notecount) { Elf32_Nhdr nh32; Elf64_Nhdr nh64; size_t noff, doff; uint32_t namesz, descsz; unsigned char *nbuf = CAST(unsigned char *, vbuf); if (*notecount == 0) return 0; --*notecount; if (xnh_sizeof + offset > size) { /* * We're out of note headers. */ return xnh_sizeof + offset; } (void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof); offset += xnh_sizeof; namesz = xnh_namesz; descsz = xnh_descsz; if ((namesz == 0) && (descsz == 0)) { /* * We're out of note headers. */ return (offset >= size) ? offset : size; } if (namesz & 0x80000000) { (void)file_printf(ms, ", bad note name size 0x%lx", (unsigned long)namesz); return 0; } if (descsz & 0x80000000) { (void)file_printf(ms, ", bad note description size 0x%lx", (unsigned long)descsz); return 0; } noff = offset; doff = ELF_ALIGN(offset + namesz); if (offset + namesz > size) { /* * We're past the end of the buffer. */ return doff; } offset = ELF_ALIGN(doff + descsz); if (doff + descsz > size) { /* * We're past the end of the buffer. */ return (offset >= size) ? offset : size; } if ((*flags & FLAGS_DID_OS_NOTE) == 0) { if (do_os_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags)) return size; } if ((*flags & FLAGS_DID_BUILD_ID) == 0) { if (do_bid_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags)) return size; } if ((*flags & FLAGS_DID_NETBSD_PAX) == 0) { if (do_pax_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags)) return size; } if ((*flags & FLAGS_DID_CORE) == 0) { if (do_core_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags, size, clazz)) return size; } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { switch (xnh_type) { case NT_NETBSD_VERSION: return size; case NT_NETBSD_MARCH: if (*flags & FLAGS_DID_NETBSD_MARCH) return size; if (file_printf(ms, ", compiled for: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; case NT_NETBSD_CMODEL: if (*flags & FLAGS_DID_NETBSD_CMODEL) return size; if (file_printf(ms, ", compiler model: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; default: if (*flags & FLAGS_DID_NETBSD_UNKNOWN) return size; if (file_printf(ms, ", note=%u", xnh_type) == -1) return size; break; } return size; } return offset; } /* SunOS 5.x hardware capability descriptions */ typedef struct cap_desc { uint64_t cd_mask; const char *cd_name; } cap_desc_t; static const cap_desc_t cap_desc_sparc[] = { { AV_SPARC_MUL32, "MUL32" }, { AV_SPARC_DIV32, "DIV32" }, { AV_SPARC_FSMULD, "FSMULD" }, { AV_SPARC_V8PLUS, "V8PLUS" }, { AV_SPARC_POPC, "POPC" }, { AV_SPARC_VIS, "VIS" }, { AV_SPARC_VIS2, "VIS2" }, { AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" }, { AV_SPARC_FMAF, "FMAF" }, { AV_SPARC_FJFMAU, "FJFMAU" }, { AV_SPARC_IMA, "IMA" }, { 0, NULL } }; static const cap_desc_t cap_desc_386[] = { { AV_386_FPU, "FPU" }, { AV_386_TSC, "TSC" }, { AV_386_CX8, "CX8" }, { AV_386_SEP, "SEP" }, { AV_386_AMD_SYSC, "AMD_SYSC" }, { AV_386_CMOV, "CMOV" }, { AV_386_MMX, "MMX" }, { AV_386_AMD_MMX, "AMD_MMX" }, { AV_386_AMD_3DNow, "AMD_3DNow" }, { AV_386_AMD_3DNowx, "AMD_3DNowx" }, { AV_386_FXSR, "FXSR" }, { AV_386_SSE, "SSE" }, { AV_386_SSE2, "SSE2" }, { AV_386_PAUSE, "PAUSE" }, { AV_386_SSE3, "SSE3" }, { AV_386_MON, "MON" }, { AV_386_CX16, "CX16" }, { AV_386_AHF, "AHF" }, { AV_386_TSCP, "TSCP" }, { AV_386_AMD_SSE4A, "AMD_SSE4A" }, { AV_386_POPCNT, "POPCNT" }, { AV_386_AMD_LZCNT, "AMD_LZCNT" }, { AV_386_SSSE3, "SSSE3" }, { AV_386_SSE4_1, "SSE4.1" }, { AV_386_SSE4_2, "SSE4.2" }, { 0, NULL } }; private int doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int mach, int strtab, int *flags, uint16_t *notecount) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1; size_t nbadcap = 0; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */ char name[50]; ssize_t namesize; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) < (ssize_t)xsh_sizeof) { file_badread(ms); return -1; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if ((namesize = pread(fd, name, sizeof(name) - 1, name_off + xsh_name)) == -1) { file_badread(ms); return -1; } name[namesize] = '\0'; if (strcmp(name, ".debug_info") == 0) stripped = 0; if (pread(fd, xsh_addr, xsh_sizeof, off) < (ssize_t)xsh_sizeof) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) < (ssize_t)xsh_size) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags, notecount); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (nbadcap > 5) break; if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "0x%" INT64_T_FORMAT "x = 0x%" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; if (nbadcap++ > 2) coff = xsh_size; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; } /* * Look through the program headers of an executable image, searching * for a PT_INTERP section; if one is found, it's dynamically linked, * otherwise it's statically linked. */ private int dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int sh_num, int *flags, uint16_t *notecount) { Elf32_Phdr ph32; Elf64_Phdr ph64; const char *linking_style = "statically"; const char *interp = ""; unsigned char nbuf[BUFSIZ]; char ibuf[BUFSIZ]; ssize_t bufsize; size_t offset, align, len; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += size; bufsize = 0; align = 4; /* Things we can determine before we seek */ switch (xph_type) { case PT_DYNAMIC: linking_style = "dynamically"; break; case PT_NOTE: if (sh_num) /* Did this through section headers */ continue; if (((align = xph_align) & 0x80000000UL) != 0 || align < 4) { if (file_printf(ms, ", invalid note alignment 0x%lx", (unsigned long)align) == -1) return -1; align = 4; } /*FALLTHROUGH*/ case PT_INTERP: len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); bufsize = pread(fd, nbuf, len, xph_offset); if (bufsize == -1) { file_badread(ms); return -1; } break; default: if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Maybe warn here? */ continue; } break; } /* Things we can determine when we seek */ switch (xph_type) { case PT_INTERP: if (bufsize && nbuf[0]) { nbuf[bufsize - 1] = '\0'; interp = (const char *)nbuf; } else interp = "*empty*"; break; case PT_NOTE: /* * This is a PT_NOTE section; loop through all the notes * in the section. */ offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, align, flags, notecount); if (offset == 0) break; } break; default: break; } } if (file_printf(ms, ", %s linked", linking_style) == -1) return -1; if (interp[0]) if (file_printf(ms, ", interpreter %s", file_printable(ibuf, sizeof(ibuf), interp)) == -1) return -1; return 0; } protected int file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf, size_t nbytes) { union { int32_t l; char c[sizeof (int32_t)]; } u; int clazz; int swap; struct stat st; off_t fsize; int flags = 0; Elf32_Ehdr elf32hdr; Elf64_Ehdr elf64hdr; uint16_t type, phnum, shnum, notecount; if (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) return 0; /* * ELF executables have multiple section headers in arbitrary * file locations and thus file(1) cannot determine it from easily. * Instead we traverse thru all section headers until a symbol table * one is found or else the binary is stripped. * Return immediately if it's not ELF (so we avoid pipe2file unless needed). */ if (buf[EI_MAG0] != ELFMAG0 || (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1) || buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) return 0; /* * If we cannot seek, it must be a pipe, socket or fifo. */ if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE)) fd = file_pipe2file(ms, fd, buf, nbytes); if (fstat(fd, &st) == -1) { file_badread(ms); return -1; } if (S_ISREG(st.st_mode) || st.st_size != 0) fsize = st.st_size; else fsize = SIZE_UNKNOWN; clazz = buf[EI_CLASS]; switch (clazz) { case ELFCLASS32: #undef elf_getu #define elf_getu(a, b) elf_getu32(a, b) #undef elfhdr #define elfhdr elf32hdr #include "elfclass.h" case ELFCLASS64: #undef elf_getu #define elf_getu(a, b) elf_getu64(a, b) #undef elfhdr #define elfhdr elf64hdr #include "elfclass.h" default: if (file_printf(ms, ", unknown class %d", clazz) == -1) return -1; break; } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2393_0
crossvul-cpp_data_bad_3471_0
/* * linux/fs/lockd/clntproc.c * * RPC procedures for the client side NLM implementation * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/nfs_fs.h> #include <linux/utsname.h> #include <linux/freezer.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #define NLMDBG_FACILITY NLMDBG_CLIENT #define NLMCLNT_GRACE_WAIT (5*HZ) #define NLMCLNT_POLL_TIMEOUT (30*HZ) #define NLMCLNT_MAX_RETRIES 3 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); static int nlm_stat_to_errno(__be32 stat); static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *); static const struct rpc_call_ops nlmclnt_unlock_ops; static const struct rpc_call_ops nlmclnt_cancel_ops; /* * Cookie counter for NLM requests */ static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); void nlmclnt_next_cookie(struct nlm_cookie *c) { u32 cookie = atomic_inc_return(&nlm_cookie); memcpy(c->data, &cookie, 4); c->len=4; } static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner) { atomic_inc(&lockowner->count); return lockowner; } static void nlm_put_lockowner(struct nlm_lockowner *lockowner) { if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) return; list_del(&lockowner->list); spin_unlock(&lockowner->host->h_lock); nlmclnt_release_host(lockowner->host); kfree(lockowner); } static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) { struct nlm_lockowner *lockowner; list_for_each_entry(lockowner, &host->h_lockowners, list) { if (lockowner->pid == pid) return -EBUSY; } return 0; } static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) { uint32_t res; do { res = host->h_pidcount++; } while (nlm_pidbusy(host, res) < 0); return res; } static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) { struct nlm_lockowner *lockowner; list_for_each_entry(lockowner, &host->h_lockowners, list) { if (lockowner->owner != owner) continue; return nlm_get_lockowner(lockowner); } return NULL; } static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) { struct nlm_lockowner *res, *new = NULL; spin_lock(&host->h_lock); res = __nlm_find_lockowner(host, owner); if (res == NULL) { spin_unlock(&host->h_lock); new = kmalloc(sizeof(*new), GFP_KERNEL); spin_lock(&host->h_lock); res = __nlm_find_lockowner(host, owner); if (res == NULL && new != NULL) { res = new; atomic_set(&new->count, 1); new->owner = owner; new->pid = __nlm_alloc_pid(host); new->host = nlm_get_host(host); list_add(&new->list, &host->h_lockowners); new = NULL; } } spin_unlock(&host->h_lock); kfree(new); return res; } /* * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls */ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) { struct nlm_args *argp = &req->a_args; struct nlm_lock *lock = &argp->lock; nlmclnt_next_cookie(&argp->cookie); memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh)); lock->caller = utsname()->nodename; lock->oh.data = req->a_owner; lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", (unsigned int)fl->fl_u.nfs_fl.owner->pid, utsname()->nodename); lock->svid = fl->fl_u.nfs_fl.owner->pid; lock->fl.fl_start = fl->fl_start; lock->fl.fl_end = fl->fl_end; lock->fl.fl_type = fl->fl_type; } static void nlmclnt_release_lockargs(struct nlm_rqst *req) { BUG_ON(req->a_args.lock.fl.fl_ops != NULL); } /** * nlmclnt_proc - Perform a single client-side lock request * @host: address of a valid nlm_host context representing the NLM server * @cmd: fcntl-style file lock operation to perform * @fl: address of arguments for the lock operation * */ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) { struct nlm_rqst *call; int status; nlm_get_host(host); call = nlm_alloc_call(host); if (call == NULL) return -ENOMEM; nlmclnt_locks_init_private(fl, host); /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { if (fl->fl_type != F_UNLCK) { call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; status = nlmclnt_lock(call, fl); } else status = nlmclnt_unlock(call, fl); } else if (IS_GETLK(cmd)) status = nlmclnt_test(call, fl); else status = -EINVAL; fl->fl_ops->fl_release_private(fl); fl->fl_ops = NULL; dprintk("lockd: clnt proc returns %d\n", status); return status; } EXPORT_SYMBOL_GPL(nlmclnt_proc); /* * Allocate an NLM RPC call struct * * Note: the caller must hold a reference to host. In case of failure, * this reference will be released. */ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) { struct nlm_rqst *call; for(;;) { call = kzalloc(sizeof(*call), GFP_KERNEL); if (call != NULL) { atomic_set(&call->a_count, 1); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); call->a_host = host; return call; } if (signalled()) break; printk("nlm_alloc_call: failed, waiting for memory\n"); schedule_timeout_interruptible(5*HZ); } nlmclnt_release_host(host); return NULL; } void nlmclnt_release_call(struct nlm_rqst *call) { if (!atomic_dec_and_test(&call->a_count)) return; nlmclnt_release_host(call->a_host); nlmclnt_release_lockargs(call); kfree(call); } static void nlmclnt_rpc_release(void *data) { nlmclnt_release_call(data); } static int nlm_wait_on_grace(wait_queue_head_t *queue) { DEFINE_WAIT(wait); int status = -EINTR; prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE); if (!signalled ()) { schedule_timeout(NLMCLNT_GRACE_WAIT); try_to_freeze(); if (!signalled ()) status = 0; } finish_wait(queue, &wait); return status; } /* * Generic NLM call */ static int nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct nlm_args *argp = &req->a_args; struct nlm_res *resp = &req->a_res; struct rpc_message msg = { .rpc_argp = argp, .rpc_resp = resp, .rpc_cred = cred, }; int status; dprintk("lockd: call procedure %d on %s\n", (int)proc, host->h_name); do { if (host->h_reclaiming && !argp->reclaim) goto in_grace_period; /* If we have no RPC client yet, create one. */ if ((clnt = nlm_bind_host(host)) == NULL) return -ENOLCK; msg.rpc_proc = &clnt->cl_procinfo[proc]; /* Perform the RPC call. If an error occurs, try again */ if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { dprintk("lockd: rpc_call returned error %d\n", -status); switch (status) { case -EPROTONOSUPPORT: status = -EINVAL; break; case -ECONNREFUSED: case -ETIMEDOUT: case -ENOTCONN: nlm_rebind_host(host); status = -EAGAIN; break; case -ERESTARTSYS: return signalled () ? -EINTR : status; default: break; } break; } else if (resp->status == nlm_lck_denied_grace_period) { dprintk("lockd: server in grace period\n"); if (argp->reclaim) { printk(KERN_WARNING "lockd: spurious grace period reject?!\n"); return -ENOLCK; } } else { if (!argp->reclaim) { /* We appear to be out of the grace period */ wake_up_all(&host->h_gracewait); } dprintk("lockd: server returns status %d\n", resp->status); return 0; /* Okay, call complete */ } in_grace_period: /* * The server has rebooted and appears to be in the grace * period during which locks are only allowed to be * reclaimed. * We can only back off and try again later. */ status = nlm_wait_on_grace(&host->h_gracewait); } while (status == 0); return status; } /* * Generic NLM call, async version. */ static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct rpc_task_setup task_setup_data = { .rpc_message = msg, .callback_ops = tk_ops, .callback_data = req, .flags = RPC_TASK_ASYNC, }; dprintk("lockd: call procedure %d on %s (async)\n", (int)proc, host->h_name); /* If we have no RPC client yet, create one. */ clnt = nlm_bind_host(host); if (clnt == NULL) goto out_err; msg->rpc_proc = &clnt->cl_procinfo[proc]; task_setup_data.rpc_client = clnt; /* bootstrap and kick off the async RPC call */ return rpc_run_task(&task_setup_data); out_err: tk_ops->rpc_release(req); return ERR_PTR(-ENOLCK); } static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { struct rpc_task *task; task = __nlm_async_call(req, proc, msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } /* * NLM asynchronous call. */ int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, }; return nlm_do_async_call(req, proc, &msg, tk_ops); } int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_res, }; return nlm_do_async_call(req, proc, &msg, tk_ops); } /* * NLM client asynchronous call. * * Note that although the calls are asynchronous, and are therefore * guaranteed to complete, we still always attempt to wait for * completion in order to be able to correctly track the lock * state. */ static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, .rpc_cred = cred, }; struct rpc_task *task; int err; task = __nlm_async_call(req, proc, &msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); err = rpc_wait_for_completion_task(task); rpc_put_task(task); return err; } /* * TEST for the presence of a conflicting lock */ static int nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) { int status; status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); if (status < 0) goto out; switch (req->a_res.status) { case nlm_granted: fl->fl_type = F_UNLCK; break; case nlm_lck_denied: /* * Report the conflicting lock back to the application. */ fl->fl_start = req->a_res.lock.fl.fl_start; fl->fl_end = req->a_res.lock.fl.fl_end; fl->fl_type = req->a_res.lock.fl.fl_type; fl->fl_pid = 0; break; default: status = nlm_stat_to_errno(req->a_res.status); } out: nlmclnt_release_call(req); return status; } static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) { spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner); list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); } static void nlmclnt_locks_release_private(struct file_lock *fl) { spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); list_del(&fl->fl_u.nfs_fl.list); spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); nlm_put_lockowner(fl->fl_u.nfs_fl.owner); } static const struct file_lock_operations nlmclnt_lock_ops = { .fl_copy_lock = nlmclnt_locks_copy_lock, .fl_release_private = nlmclnt_locks_release_private, }; static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) { BUG_ON(fl->fl_ops != NULL); fl->fl_u.nfs_fl.state = 0; fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); fl->fl_ops = &nlmclnt_lock_ops; } static int do_vfs_lock(struct file_lock *fl) { int res = 0; switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { case FL_POSIX: res = posix_lock_file_wait(fl->fl_file, fl); break; case FL_FLOCK: res = flock_lock_file_wait(fl->fl_file, fl); break; default: BUG(); } return res; } /* * LOCK: Try to create a lock * * Programmer Harassment Alert * * When given a blocking lock request in a sync RPC call, the HPUX lockd * will faithfully return LCK_BLOCKED but never cares to notify us when * the lock could be granted. This way, our local process could hang * around forever waiting for the callback. * * Solution A: Implement busy-waiting * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES}) * * For now I am implementing solution A, because I hate the idea of * re-implementing lockd for a third time in two months. The async * calls shouldn't be too hard to do, however. * * This is one of the lovely things about standards in the NFS area: * they're so soft and squishy you can't really blame HP for doing this. */ static int nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) { struct rpc_cred *cred = nfs_file_cred(fl->fl_file); struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; struct nlm_wait *block = NULL; unsigned char fl_flags = fl->fl_flags; unsigned char fl_type; int status = -ENOLCK; if (nsm_monitor(host) < 0) goto out; req->a_args.state = nsm_local_state; fl->fl_flags |= FL_ACCESS; status = do_vfs_lock(fl); fl->fl_flags = fl_flags; if (status < 0) goto out; block = nlmclnt_prepare_block(host, fl); again: /* * Initialise resp->status to a valid non-zero value, * since 0 == nlm_lck_granted */ resp->status = nlm_lck_blocked; for(;;) { /* Reboot protection */ fl->fl_u.nfs_fl.state = host->h_state; status = nlmclnt_call(cred, req, NLMPROC_LOCK); if (status < 0) break; /* Did a reclaimer thread notify us of a server reboot? */ if (resp->status == nlm_lck_denied_grace_period) continue; if (resp->status != nlm_lck_blocked) break; /* Wait on an NLM blocking lock */ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); if (status < 0) break; if (resp->status != nlm_lck_blocked) break; } /* if we were interrupted while blocking, then cancel the lock request * and exit */ if (resp->status == nlm_lck_blocked) { if (!req->a_args.block) goto out_unlock; if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) goto out_unblock; } if (resp->status == nlm_granted) { down_read(&host->h_rwsem); /* Check whether or not the server has rebooted */ if (fl->fl_u.nfs_fl.state != host->h_state) { up_read(&host->h_rwsem); goto again; } /* Ensure the resulting lock will get added to granted list */ fl->fl_flags |= FL_SLEEP; if (do_vfs_lock(fl) < 0) printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; status = 0; } if (status < 0) goto out_unlock; /* * EAGAIN doesn't make sense for sleeping locks, and in some * cases NLM_LCK_DENIED is returned for a permanent error. So * turn it into an ENOLCK. */ if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP)) status = -ENOLCK; else status = nlm_stat_to_errno(resp->status); out_unblock: nlmclnt_finish_block(block); out: nlmclnt_release_call(req); return status; out_unlock: /* Fatal error: ensure that we remove the lock altogether */ dprintk("lockd: lock attempt ended in fatal error.\n" " Attempting to unlock.\n"); nlmclnt_finish_block(block); fl_type = fl->fl_type; fl->fl_type = F_UNLCK; down_read(&host->h_rwsem); do_vfs_lock(fl); up_read(&host->h_rwsem); fl->fl_type = fl_type; fl->fl_flags = fl_flags; nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); return status; } /* * RECLAIM: Try to reclaim a lock */ int nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) { struct nlm_rqst reqst, *req; int status; req = &reqst; memset(req, 0, sizeof(*req)); locks_init_lock(&req->a_args.lock.fl); locks_init_lock(&req->a_res.lock.fl); req->a_host = host; req->a_flags = 0; /* Set up the argument struct */ nlmclnt_setlockargs(req, fl); req->a_args.reclaim = 1; status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); if (status >= 0 && req->a_res.status == nlm_granted) return 0; printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " "(errno %d, status %d)\n", fl->fl_pid, status, ntohl(req->a_res.status)); /* * FIXME: This is a serious failure. We can * * a. Ignore the problem * b. Send the owning process some signal (Linux doesn't have * SIGLOST, though...) * c. Retry the operation * * Until someone comes up with a simple implementation * for b or c, I'll choose option a. */ return -ENOLCK; } /* * UNLOCK: remove an existing lock */ static int nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) { struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; int status; unsigned char fl_flags = fl->fl_flags; /* * Note: the server is supposed to either grant us the unlock * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either * case, we want to unlock. */ fl->fl_flags |= FL_EXISTS; down_read(&host->h_rwsem); status = do_vfs_lock(fl); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; if (status == -ENOENT) { status = 0; goto out; } atomic_inc(&req->a_count); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); if (status < 0) goto out; if (resp->status == nlm_granted) goto out; if (resp->status != nlm_lck_denied_nolocks) printk("lockd: unexpected unlock status: %d\n", resp->status); /* What to do now? I'm out of my depth... */ status = -ENOLCK; out: nlmclnt_release_call(req); return status; } static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) { struct nlm_rqst *req = data; u32 status = ntohl(req->a_res.status); if (RPC_ASSASSINATED(task)) goto die; if (task->tk_status < 0) { dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); goto retry_rebind; } if (status == NLM_LCK_DENIED_GRACE_PERIOD) { rpc_delay(task, NLMCLNT_GRACE_WAIT); goto retry_unlock; } if (status != NLM_LCK_GRANTED) printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); die: return; retry_rebind: nlm_rebind_host(req->a_host); retry_unlock: rpc_restart_call(task); } static const struct rpc_call_ops nlmclnt_unlock_ops = { .rpc_call_done = nlmclnt_unlock_callback, .rpc_release = nlmclnt_rpc_release, }; /* * Cancel a blocked lock request. * We always use an async RPC call for this in order not to hang a * process that has been Ctrl-C'ed. */ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) { struct nlm_rqst *req; int status; dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" " Attempting to cancel lock.\n"); req = nlm_alloc_call(nlm_get_host(host)); if (!req) return -ENOMEM; req->a_flags = RPC_TASK_ASYNC; nlmclnt_setlockargs(req, fl); req->a_args.block = block; atomic_inc(&req->a_count); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); if (status == 0 && req->a_res.status == nlm_lck_denied) status = -ENOLCK; nlmclnt_release_call(req); return status; } static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) { struct nlm_rqst *req = data; u32 status = ntohl(req->a_res.status); if (RPC_ASSASSINATED(task)) goto die; if (task->tk_status < 0) { dprintk("lockd: CANCEL call error %d, retrying.\n", task->tk_status); goto retry_cancel; } dprintk("lockd: cancel status %u (task %u)\n", status, task->tk_pid); switch (status) { case NLM_LCK_GRANTED: case NLM_LCK_DENIED_GRACE_PERIOD: case NLM_LCK_DENIED: /* Everything's good */ break; case NLM_LCK_DENIED_NOLOCKS: dprintk("lockd: CANCEL failed (server has no locks)\n"); goto retry_cancel; default: printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", status); } die: return; retry_cancel: /* Don't ever retry more than 3 times */ if (req->a_retries++ >= NLMCLNT_MAX_RETRIES) goto die; nlm_rebind_host(req->a_host); rpc_restart_call(task); rpc_delay(task, 30 * HZ); } static const struct rpc_call_ops nlmclnt_cancel_ops = { .rpc_call_done = nlmclnt_cancel_callback, .rpc_release = nlmclnt_rpc_release, }; /* * Convert an NLM status code to a generic kernel errno */ static int nlm_stat_to_errno(__be32 status) { switch(ntohl(status)) { case NLM_LCK_GRANTED: return 0; case NLM_LCK_DENIED: return -EAGAIN; case NLM_LCK_DENIED_NOLOCKS: case NLM_LCK_DENIED_GRACE_PERIOD: return -ENOLCK; case NLM_LCK_BLOCKED: printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n"); return -ENOLCK; #ifdef CONFIG_LOCKD_V4 case NLM_DEADLCK: return -EDEADLK; case NLM_ROFS: return -EROFS; case NLM_STALE_FH: return -ESTALE; case NLM_FBIG: return -EOVERFLOW; case NLM_FAILED: return -ENOLCK; #endif } printk(KERN_NOTICE "lockd: unexpected server status %d\n", status); return -ENOLCK; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3471_0
crossvul-cpp_data_bad_946_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO GGGGG RRRR IIIII FFFFF Y Y % % MM MM O O G R R I F Y Y % % M M M O O G GGG RRRR I FFF Y % % M M O O G G R R I F Y % % M M OOO GGGG R R IIIII F Y % % % % % % MagickWand Module Methods % % % % Software Design % % Cristy % % March 2000 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use the mogrify program to resize an image, blur, crop, despeckle, dither, % draw on, flip, join, re-sample, and much more. This tool is similiar to % convert except that the original image file is overwritten (unless you % change the file suffix with the -format option) with any changes you % request. % */ /* Include declarations. */ #include "MagickWand/studio.h" #include "MagickWand/MagickWand.h" #include "MagickWand/magick-wand-private.h" #include "MagickWand/mogrify-private.h" #include "MagickCore/blob-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/image-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_HAVE_UTIME_H) #include <utime.h> #endif /* Constant declaration. */ static const char MogrifyAlphaColor[] = "#bdbdbd", /* gray */ MogrifyBackgroundColor[] = "#ffffff", /* white */ MogrifyBorderColor[] = "#dfdfdf"; /* gray */ /* Define declarations. */ #define UndefinedCompressionQuality 0UL /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o m m a n d G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickCommandGenesis() applies image processing options to an image as % prescribed by command line options. % % It wiil look for special options like "-debug", "-bench", and % "-distribute-cache" that needs to be applied even before the main % processing begins, and may completely overrule normal command processing. % Such 'Genesis' Options can only be given on the CLI, (not in a script) % and are typically ignored (as they have been handled) if seen later. % % The format of the MagickCommandGenesis method is: % % MagickBooleanType MagickCommandGenesis(ImageInfo *image_info, % MagickCommand command,int argc,char **argv,char **metadata, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o command: Choose from ConvertImageCommand, IdentifyImageCommand, % MogrifyImageCommand, CompositeImageCommand, CompareImagesCommand, % ConjureImageCommand, StreamImageCommand, ImportImageCommand, % DisplayImageCommand, or AnimateImageCommand. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o metadata: any metadata is returned here. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MagickCommandGenesis(ImageInfo *image_info, MagickCommand command,int argc,char **argv,char **metadata, ExceptionInfo *exception) { char client_name[MaxTextExtent], *option; double duration, serial; MagickBooleanType concurrent, regard_warnings, status; register ssize_t i; size_t iterations, number_threads; ssize_t n; (void) setlocale(LC_ALL,""); (void) setlocale(LC_NUMERIC,"C"); GetPathComponent(argv[0],TailPath,client_name); (void) SetClientName(client_name); concurrent=MagickFalse; duration=(-1.0); iterations=1; status=MagickTrue; regard_warnings=MagickFalse; for (i=1; i < (ssize_t) (argc-1); i++) { option=argv[i]; if ((strlen(option) == 1) || ((*option != '-') && (*option != '+'))) continue; if (LocaleCompare("-bench",option) == 0) iterations=StringToUnsignedLong(argv[++i]); if (LocaleCompare("-concurrent",option) == 0) concurrent=MagickTrue; if (LocaleCompare("-debug",option) == 0) (void) SetLogEventMask(argv[++i]); if (LocaleCompare("-distribute-cache",option) == 0) { DistributePixelCacheServer(StringToInteger(argv[++i]),exception); exit(0); } if (LocaleCompare("-duration",option) == 0) duration=StringToDouble(argv[++i],(char **) NULL); if (LocaleCompare("-regard-warnings",option) == 0) regard_warnings=MagickTrue; } if (iterations == 1) { char *text; text=(char *) NULL; status=command(image_info,argc,argv,&text,exception); if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if (text != (char *) NULL) { if (metadata != (char **) NULL) (void) ConcatenateString(&(*metadata),text); text=DestroyString(text); } return(status); } number_threads=GetOpenMPMaximumThreads(); serial=0.0; for (n=1; n <= (ssize_t) number_threads; n++) { double e, parallel, user_time; TimerInfo *timer; (void) SetMagickResourceLimit(ThreadResource,(MagickSizeType) n); timer=AcquireTimerInfo(); if (concurrent == MagickFalse) { for (i=0; i < (ssize_t) iterations; i++) { char *text; text=(char *) NULL; if (status == MagickFalse) continue; if (duration > 0) { if (GetElapsedTime(timer) > duration) continue; (void) ContinueTimer(timer); } status=command(image_info,argc,argv,&text,exception); if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if (text != (char *) NULL) { if (metadata != (char **) NULL) (void) ConcatenateString(&(*metadata),text); text=DestroyString(text); } } } else { SetOpenMPNested(1); #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp parallel for shared(status) #endif for (i=0; i < (ssize_t) iterations; i++) { char *text; text=(char *) NULL; if (status == MagickFalse) continue; if (duration > 0) { if (GetElapsedTime(timer) > duration) continue; (void) ContinueTimer(timer); } status=command(image_info,argc,argv,&text,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_MagickCommandGenesis) #endif { if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if (text != (char *) NULL) { if (metadata != (char **) NULL) (void) ConcatenateString(&(*metadata),text); text=DestroyString(text); } } } } user_time=GetUserTime(timer); parallel=GetElapsedTime(timer); e=1.0; if (n == 1) serial=parallel; else e=((1.0/(1.0/((serial/(serial+parallel))+(1.0-(serial/(serial+parallel)))/ (double) n)))-(1.0/(double) n))/(1.0-1.0/(double) n); (void) FormatLocaleFile(stderr, " Performance[%.20g]: %.20gi %0.3fips %0.6fe %0.6fu %lu:%02lu.%03lu\n", (double) n,(double) iterations,(double) iterations/parallel,e,user_time, (unsigned long) (parallel/60.0),(unsigned long) floor(fmod(parallel, 60.0)),(unsigned long) (1000.0*(parallel-floor(parallel))+0.5)); timer=DestroyTimerInfo(timer); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImage() applies simple single image processing options to a single % image that may be part of a large list, but also handles any 'region' % image handling. % % The image in the list may be modified in three different ways... % % * directly modified (EG: -negate, -gamma, -level, -annotate, -draw), % * replaced by a new image (EG: -spread, -resize, -rotate, -morphology) % * replace by a list of images (only the -separate option!) % % In each case the result is returned into the list, and a pointer to the % modified image (last image added if replaced by a list of images) is % returned. % % ASIDE: The -crop is present but restricted to non-tile single image crops % % This means if all the images are being processed (such as by % MogrifyImages(), next image to be processed will be as per the pointer % (*image)->next. Also the image list may grow as a result of some specific % operations but as images are never merged or deleted, it will never shrink % in length. Typically the list will remain the same length. % % WARNING: As the image pointed to may be replaced, the first image in the % list may also change. GetFirstImageInList() should be used by caller if % they wish return the Image pointer to the first image in list. % % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc, % const char **argv,Image **image) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Image *GetImageCache(const ImageInfo *image_info,const char *path, ExceptionInfo *exception) { char key[MagickPathExtent]; ExceptionInfo *sans_exception; Image *image; ImageInfo *read_info; /* Read an image into a image cache (for repeated usage) if not already in cache. Then return the image that is in the cache. */ (void) FormatLocaleString(key,MagickPathExtent,"cache:%s",path); sans_exception=AcquireExceptionInfo(); image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (image != (Image *) NULL) return(image); read_info=CloneImageInfo(image_info); (void) CopyMagickString(read_info->filename,path,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) (void) SetImageRegistry(ImageRegistryType,key,image,exception); return(image); } static inline MagickBooleanType IsPathWritable(const char *path) { if (IsPathAccessible(path) == MagickFalse) return(MagickFalse); if (access_utf8(path,W_OK) != 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType MonitorProgress(const char *text, const MagickOffsetType offset,const MagickSizeType extent, void *wand_unused(client_data)) { char message[MagickPathExtent], tag[MagickPathExtent]; const char *locale_message; register char *p; magick_unreferenced(client_data); if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent)) return(MagickTrue); if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0)) return(MagickTrue); (void) CopyMagickString(tag,text,MagickPathExtent); p=strrchr(tag,'/'); if (p != (char *) NULL) *p='\0'; (void) FormatLocaleString(message,MagickPathExtent,"Monitor/%s",tag); locale_message=GetLocaleMessage(message); if (locale_message == message) locale_message=tag; if (p == (char *) NULL) (void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r", locale_message,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); else (void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r", locale_message,p+1,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); if (offset == (MagickOffsetType) (extent-1)) (void) FormatLocaleFile(stderr,"\n"); (void) fflush(stderr); return(MagickTrue); } static Image *SparseColorOption(const Image *image, const SparseColorMethod method,const char *arguments, const MagickBooleanType color_from_image,ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p; double *sparse_arguments; Image *sparse_image; PixelInfo color; MagickBooleanType error; register size_t x; size_t number_arguments, number_colors; /* SparseColorOption() parses the complex -sparse-color argument into an an array of floating point values then calls SparseColorImage(). Argument is a complex mix of floating-point pixel coodinates, and color specifications (or direct floating point numbers). The number of floats needed to represent a color varies depending on the current channel setting. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Limit channels according to image - and add up number of color channel. */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) number_colors++; /* Read string, to determine number of arguments needed, */ p=arguments; x=0; while( *p != '\0' ) { GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == ',' ) continue; if ( isalpha((int) token[0]) || token[0] == '#' ) { if ( color_from_image ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color arg given, when colors are coming from image"); return( (Image *) NULL); } x += number_colors; /* color argument */ } else { x++; /* floating point argument */ } } error=MagickTrue; if ( color_from_image ) { /* just the control points are being given */ error = ( x % 2 != 0 ) ? MagickTrue : MagickFalse; number_arguments=(x/2)*(2+number_colors); } else { /* control points and color values */ error = ( x % (2+number_colors) != 0 ) ? MagickTrue : MagickFalse; number_arguments=x; } if ( error ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Invalid number of Arguments"); return( (Image *) NULL); } /* Allocate and fill in the floating point arguments */ sparse_arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*sparse_arguments)); if (sparse_arguments == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError, " MemoryAllocationFailed\n""%s","SparseColorOption"); return( (Image *) NULL); } (void) memset(sparse_arguments,0,number_arguments* sizeof(*sparse_arguments)); p=arguments; x=0; while( *p != '\0' && x < number_arguments ) { /* X coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of X-coord"); error = MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* Y coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of Y-coord"); error = MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* color values for this control point */ #if 0 if ( (color_from_image ) { /* get color from image */ /* HOW??? */ } else #endif { /* color name or function given in string argument */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { /* Color string given */ (void) QueryColorCompliance(token,AllCompliance,&color,exception); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.blue; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) sparse_arguments[x++] = QuantumScale*color.black; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) sparse_arguments[x++] = QuantumScale*color.alpha; } else { /* Colors given as a set of floating point values - experimental */ /* NB: token contains the first floating point value to use! */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } } } } if ( number_arguments != x && !error ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, " InvalidArgument","'%s': %s","sparse-color","Argument Parsing Error"); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( (Image *) NULL); } if ( error ) return( (Image *) NULL); /* Call the Interpolation function with the parsed arguments */ sparse_image=SparseColorImage(image,method,number_arguments,sparse_arguments, exception); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( sparse_image ); } WandExport MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc, const char **argv,Image **image,ExceptionInfo *exception) { CompositeOperator compose; const char *format, *option; double attenuate; DrawInfo *draw_info; GeometryInfo geometry_info; ImageInfo *mogrify_info; MagickStatusType status; PixelInfo fill; MagickStatusType flags; PixelInterpolateMethod interpolate_method; QuantizeInfo *quantize_info; RectangleInfo geometry, region_geometry; register ssize_t i; /* Initialize method variables. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (argc < 0) return(MagickTrue); mogrify_info=CloneImageInfo(image_info); draw_info=CloneDrawInfo(mogrify_info,(DrawInfo *) NULL); quantize_info=AcquireQuantizeInfo(mogrify_info); SetGeometryInfo(&geometry_info); GetPixelInfo(*image,&fill); fill=(*image)->background_color; attenuate=1.0; compose=(*image)->compose; interpolate_method=UndefinedInterpolatePixel; format=GetImageOption(mogrify_info,"format"); SetGeometry(*image,&region_geometry); /* Transmogrify the image. */ for (i=0; i < (ssize_t) argc; i++) { Image *mogrify_image; ssize_t count; option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=MagickMax(ParseCommandOption(MagickCommandOptions,MagickFalse,option), 0L); if ((i+count) >= (ssize_t) argc) break; status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception); mogrify_image=(Image *) NULL; switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { /* Adaptive blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=AdaptiveBlurImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { /* Adaptive resize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=AdaptiveResizeImage(*image,geometry.width, geometry.height,exception); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { /* Adaptive sharpen image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=AdaptiveSharpenImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("affine",option+1) == 0) { /* Affine matrix. */ if (*option == '+') { GetAffineMatrix(&draw_info->affine); break; } (void) ParseAffineGeometry(argv[i+1],&draw_info->affine,exception); break; } if (LocaleCompare("alpha",option+1) == 0) { AlphaChannelOption alpha_type; (void) SyncImageSettings(mogrify_info,*image,exception); alpha_type=(AlphaChannelOption) ParseCommandOption( MagickAlphaChannelOptions,MagickFalse,argv[i+1]); (void) SetImageAlphaChannel(*image,alpha_type,exception); break; } if (LocaleCompare("annotate",option+1) == 0) { char *text, geometry_str[MagickPathExtent]; /* Annotate image. */ (void) SyncImageSettings(mogrify_info,*image,exception); SetGeometryInfo(&geometry_info); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; text=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (text == (char *) NULL) break; (void) CloneString(&draw_info->text,text); text=DestroyString(text); (void) FormatLocaleString(geometry_str,MagickPathExtent,"%+f%+f", geometry_info.xi,geometry_info.psi); (void) CloneString(&draw_info->geometry,geometry_str); draw_info->affine.sx=cos(DegreesToRadians( fmod(geometry_info.rho,360.0))); draw_info->affine.rx=sin(DegreesToRadians( fmod(geometry_info.rho,360.0))); draw_info->affine.ry=(-sin(DegreesToRadians( fmod(geometry_info.sigma,360.0)))); draw_info->affine.sy=cos(DegreesToRadians( fmod(geometry_info.sigma,360.0))); (void) AnnotateImage(*image,draw_info,exception); break; } if (LocaleCompare("antialias",option+1) == 0) { draw_info->stroke_antialias=(*option == '-') ? MagickTrue : MagickFalse; draw_info->text_antialias=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("attenuate",option+1) == 0) { if (*option == '+') { attenuate=1.0; break; } attenuate=StringToDouble(argv[i+1],(char **) NULL); break; } if (LocaleCompare("auto-gamma",option+1) == 0) { /* Auto Adjust Gamma of image based on its mean */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) AutoGammaImage(*image,exception); break; } if (LocaleCompare("auto-level",option+1) == 0) { /* Perfectly Normalize (max/min stretch) the image */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) AutoLevelImage(*image,exception); break; } if (LocaleCompare("auto-orient",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=AutoOrientImage(*image,(*image)->orientation, exception); break; } if (LocaleCompare("auto-threshold",option+1) == 0) { AutoThresholdMethod method; (void) SyncImageSettings(mogrify_info,*image,exception); method=(AutoThresholdMethod) ParseCommandOption( MagickAutoThresholdOptions,MagickFalse,argv[i+1]); (void) AutoThresholdImage(*image,method,exception); break; } break; } case 'b': { if (LocaleCompare("black-threshold",option+1) == 0) { /* Black threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) BlackThresholdImage(*image,argv[i+1],exception); break; } if (LocaleCompare("blue-shift",option+1) == 0) { /* Blue shift image. */ (void) SyncImageSettings(mogrify_info,*image,exception); geometry_info.rho=1.5; if (*option == '-') flags=ParseGeometry(argv[i+1],&geometry_info); mogrify_image=BlueShiftImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("blur",option+1) == 0) { /* Gaussian blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.0; mogrify_image=BlurImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("border",option+1) == 0) { /* Surround image with a border of solid color. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=BorderImage(*image,&geometry,compose,exception); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') { (void) QueryColorCompliance(MogrifyBorderColor,AllCompliance, &draw_info->border_color,exception); break; } (void) QueryColorCompliance(argv[i+1],AllCompliance, &draw_info->border_color,exception); break; } if (LocaleCompare("box",option+1) == 0) { (void) QueryColorCompliance(argv[i+1],AllCompliance, &draw_info->undercolor,exception); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { double brightness, contrast; /* Brightness / contrast image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); brightness=geometry_info.rho; contrast=0.0; if ((flags & SigmaValue) != 0) contrast=geometry_info.sigma; (void) BrightnessContrastImage(*image,brightness,contrast, exception); break; } break; } case 'c': { if (LocaleCompare("canny",option+1) == 0) { /* Detect edges in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.10; if ((flags & PsiValue) == 0) geometry_info.psi=0.30; if ((flags & PercentValue) != 0) { geometry_info.xi/=100.0; geometry_info.psi/=100.0; } mogrify_image=CannyEdgeImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception); break; } if (LocaleCompare("cdl",option+1) == 0) { char *color_correction_collection; /* Color correct with a color decision list. */ (void) SyncImageSettings(mogrify_info,*image,exception); color_correction_collection=FileToString(argv[i+1],~0UL,exception); if (color_correction_collection == (char *) NULL) break; (void) ColorDecisionListImage(*image,color_correction_collection, exception); break; } if (LocaleCompare("channel",option+1) == 0) { ChannelType channel; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) SetPixelChannelMask(*image,DefaultChannels); break; } channel=(ChannelType) ParseChannelOption(argv[i+1]); (void) SetPixelChannelMask(*image,channel); break; } if (LocaleCompare("charcoal",option+1) == 0) { /* Charcoal image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; mogrify_image=CharcoalImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("chop",option+1) == 0) { /* Chop the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ChopImage(*image,&geometry,exception); break; } if (LocaleCompare("clahe",option+1) == 0) { /* Contrast limited adaptive histogram equalization. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); flags=ParseGeometry(argv[i+1],&geometry_info); (void) CLAHEImage(*image,geometry.width,geometry.height, (size_t) geometry.x,geometry_info.psi,exception); break; } if (LocaleCompare("clip",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) SetImageMask(*image,WritePixelMask,(Image *) NULL, exception); break; } (void) ClipImage(*image,exception); break; } if (LocaleCompare("clip-mask",option+1) == 0) { Image *clip_mask; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,WritePixelMask,(Image *) NULL, exception); break; } /* Set the image mask. */ clip_mask=GetImageCache(mogrify_info,argv[i+1],exception); if (clip_mask == (Image *) NULL) break; (void) SetImageMask(*image,WritePixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); break; } if (LocaleCompare("clip-path",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) ClipImagePath(*image,argv[i+1],*option == '-' ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("colorize",option+1) == 0) { /* Colorize the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=ColorizeImage(*image,argv[i+1],&fill,exception); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel; (void) SyncImageSettings(mogrify_info,*image,exception); kernel=AcquireKernelInfo(argv[i+1],exception); if (kernel == (KernelInfo *) NULL) break; /* FUTURE: check on size of the matrix */ mogrify_image=ColorMatrixImage(*image,kernel,exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("colors",option+1) == 0) { /* Reduce the number of colors in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); quantize_info->number_colors=StringToUnsignedLong(argv[i+1]); if (quantize_info->number_colors == 0) break; if (((*image)->storage_class == DirectClass) || (*image)->colors > quantize_info->number_colors) (void) QuantizeImage(quantize_info,*image,exception); else (void) CompressImageColormap(*image,exception); break; } if (LocaleCompare("colorspace",option+1) == 0) { ColorspaceType colorspace; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) TransformImageColorspace(*image,sRGBColorspace, exception); break; } colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); (void) TransformImageColorspace(*image,colorspace,exception); break; } if (LocaleCompare("compose",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,argv[i+1]); break; } if (LocaleCompare("connected-components",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=ConnectedComponentsImage(*image,(size_t) StringToInteger(argv[i+1]),(CCObjectInfo **) NULL,exception); break; } if (LocaleCompare("contrast",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) ContrastImage(*image,(*option == '-') ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("contrast-stretch",option+1) == 0) { double black_point, white_point; /* Contrast stretch image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma : black_point; if ((flags & PercentValue) != 0) { black_point*=(double) (*image)->columns*(*image)->rows/100.0; white_point*=(double) (*image)->columns*(*image)->rows/100.0; } white_point=(double) (*image)->columns*(*image)->rows- white_point; (void) ContrastStretchImage(*image,black_point,white_point, exception); break; } if (LocaleCompare("convolve",option+1) == 0) { double gamma; KernelInfo *kernel_info; register ssize_t j; size_t extent; (void) SyncImageSettings(mogrify_info,*image,exception); kernel_info=AcquireKernelInfo(argv[i+1],exception); if (kernel_info == (KernelInfo *) NULL) break; extent=kernel_info->width*kernel_info->height; gamma=0.0; for (j=0; j < (ssize_t) extent; j++) gamma+=kernel_info->values[j]; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); for (j=0; j < (ssize_t) extent; j++) kernel_info->values[j]*=gamma; mogrify_image=MorphologyImage(*image,CorrelateMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("crop",option+1) == 0) { /* Crop a image to a smaller size */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=CropImageToTiles(*image,argv[i+1],exception); break; } if (LocaleCompare("cycle",option+1) == 0) { /* Cycle an image colormap. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) CycleColormapImage(*image,(ssize_t) StringToLong(argv[i+1]), exception); break; } break; } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { StringInfo *passkey; /* Decipher pixels. */ (void) SyncImageSettings(mogrify_info,*image,exception); passkey=FileToStringInfo(argv[i+1],~0UL,exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyDecipherImage(*image,passkey,exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("density",option+1) == 0) { /* Set image density. */ (void) CloneString(&draw_info->density,argv[i+1]); break; } if (LocaleCompare("depth",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) SetImageDepth(*image,MAGICKCORE_QUANTUM_DEPTH,exception); break; } (void) SetImageDepth(*image,StringToUnsignedLong(argv[i+1]), exception); break; } if (LocaleCompare("deskew",option+1) == 0) { double threshold; /* Straighten the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') threshold=40.0*QuantumRange/100.0; else threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); mogrify_image=DeskewImage(*image,threshold,exception); break; } if (LocaleCompare("despeckle",option+1) == 0) { /* Reduce the speckles within an image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=DespeckleImage(*image,exception); break; } if (LocaleCompare("display",option+1) == 0) { (void) CloneString(&draw_info->server_name,argv[i+1]); break; } if (LocaleCompare("distort",option+1) == 0) { char *args, token[MagickPathExtent]; const char *p; DistortMethod method; double *arguments; register ssize_t x; size_t number_arguments; /* Distort image. */ (void) SyncImageSettings(mogrify_info,*image,exception); method=(DistortMethod) ParseCommandOption(MagickDistortOptions, MagickFalse,argv[i+1]); if (method == ResizeDistortion) { double resize_args[2]; /* Special Case - Argument is actually a resize geometry! Convert that to an appropriate distortion argument array. */ (void) ParseRegionGeometry(*image,argv[i+2],&geometry, exception); resize_args[0]=(double) geometry.width; resize_args[1]=(double) geometry.height; mogrify_image=DistortImage(*image,method,(size_t)2, resize_args,MagickTrue,exception); break; } args=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (args == (char *) NULL) break; p=(char *) args; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } number_arguments=(size_t) x; arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*arguments)); if (arguments == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*image)->filename); (void) memset(arguments,0,number_arguments* sizeof(*arguments)); p=(char *) args; for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arguments[x]=StringToDouble(token,(char **) NULL); } args=DestroyString(args); mogrify_image=DistortImage(*image,method,number_arguments,arguments, (*option == '+') ? MagickTrue : MagickFalse,exception); arguments=(double *) RelinquishMagickMemory(arguments); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { quantize_info->dither_method=NoDitherMethod; break; } quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("draw",option+1) == 0) { /* Draw image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) CloneString(&draw_info->primitive,argv[i+1]); (void) DrawImage(*image,draw_info,exception); break; } break; } case 'e': { if (LocaleCompare("edge",option+1) == 0) { /* Enhance edges in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); mogrify_image=EdgeImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("emboss",option+1) == 0) { /* Emboss image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=EmbossImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("encipher",option+1) == 0) { StringInfo *passkey; /* Encipher pixels. */ (void) SyncImageSettings(mogrify_info,*image,exception); passkey=FileToStringInfo(argv[i+1],~0UL,exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyEncipherImage(*image,passkey,exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("encoding",option+1) == 0) { (void) CloneString(&draw_info->encoding,argv[i+1]); break; } if (LocaleCompare("enhance",option+1) == 0) { /* Enhance image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=EnhanceImage(*image,exception); break; } if (LocaleCompare("equalize",option+1) == 0) { /* Equalize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) EqualizeImage(*image,exception); break; } if (LocaleCompare("evaluate",option+1) == 0) { double constant; MagickEvaluateOperator op; (void) SyncImageSettings(mogrify_info,*image,exception); op=(MagickEvaluateOperator) ParseCommandOption( MagickEvaluateOptions,MagickFalse,argv[i+1]); constant=StringToDoubleInterval(argv[i+2],(double) QuantumRange+ 1.0); (void) EvaluateImage(*image,op,constant,exception); break; } if (LocaleCompare("extent",option+1) == 0) { /* Set the image extent. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception); if (geometry.width == 0) geometry.width=(*image)->columns; if (geometry.height == 0) geometry.height=(*image)->rows; mogrify_image=ExtentImage(*image,&geometry,exception); break; } break; } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option == '+') { if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); break; } (void) CloneString(&draw_info->family,argv[i+1]); break; } if (LocaleCompare("features",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:features"); break; } (void) SetImageArtifact(*image,"identify:features",argv[i+1]); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("fill",option+1) == 0) { ExceptionInfo *sans; PixelInfo color; GetPixelInfo(*image,&fill); if (*option == '+') { (void) QueryColorCompliance("none",AllCompliance,&fill, exception); draw_info->fill=fill; if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); break; } sans=AcquireExceptionInfo(); status=QueryColorCompliance(argv[i+1],AllCompliance,&color,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1], exception); else draw_info->fill=fill=color; break; } if (LocaleCompare("flip",option+1) == 0) { /* Flip image scanlines. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=FlipImage(*image,exception); break; } if (LocaleCompare("floodfill",option+1) == 0) { PixelInfo target; /* Floodfill image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParsePageGeometry(*image,argv[i+1],&geometry,exception); (void) QueryColorCompliance(argv[i+2],AllCompliance,&target, exception); (void) FloodfillPaintImage(*image,draw_info,&target,geometry.x, geometry.y,*option == '-' ? MagickFalse : MagickTrue,exception); break; } if (LocaleCompare("flop",option+1) == 0) { /* Flop image scanlines. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=FlopImage(*image,exception); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') { if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); break; } (void) CloneString(&draw_info->font,argv[i+1]); break; } if (LocaleCompare("format",option+1) == 0) { format=argv[i+1]; break; } if (LocaleCompare("frame",option+1) == 0) { FrameInfo frame_info; /* Surround image with an ornamental border. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); frame_info.width=geometry.width; frame_info.height=geometry.height; frame_info.outer_bevel=geometry.x; frame_info.inner_bevel=geometry.y; frame_info.x=(ssize_t) frame_info.width; frame_info.y=(ssize_t) frame_info.height; frame_info.width=(*image)->columns+2*frame_info.width; frame_info.height=(*image)->rows+2*frame_info.height; mogrify_image=FrameImage(*image,&frame_info,compose,exception); break; } if (LocaleCompare("function",option+1) == 0) { char *arguments, token[MagickPathExtent]; const char *p; double *parameters; MagickFunction function; register ssize_t x; size_t number_parameters; /* Function Modify Image Values */ (void) SyncImageSettings(mogrify_info,*image,exception); function=(MagickFunction) ParseCommandOption(MagickFunctionOptions, MagickFalse,argv[i+1]); arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (arguments == (char *) NULL) break; p=(char *) arguments; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } number_parameters=(size_t) x; parameters=(double *) AcquireQuantumMemory(number_parameters, sizeof(*parameters)); if (parameters == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*image)->filename); (void) memset(parameters,0,number_parameters* sizeof(*parameters)); p=(char *) arguments; for (x=0; (x < (ssize_t) number_parameters) && (*p != '\0'); x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); parameters[x]=StringToDouble(token,(char **) NULL); } arguments=DestroyString(arguments); (void) FunctionImage(*image,function,number_parameters,parameters, exception); parameters=(double *) RelinquishMagickMemory(parameters); break; } break; } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { /* Gamma image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') (*image)->gamma=StringToDouble(argv[i+1],(char **) NULL); else (void) GammaImage(*image,StringToDouble(argv[i+1],(char **) NULL), exception); break; } if ((LocaleCompare("gaussian-blur",option+1) == 0) || (LocaleCompare("gaussian",option+1) == 0)) { /* Gaussian blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=GaussianBlurImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("geometry",option+1) == 0) { /* Record Image offset, Resize last image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { if ((*image)->geometry != (char *) NULL) (*image)->geometry=DestroyString((*image)->geometry); break; } flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) CloneString(&(*image)->geometry,argv[i+1]); else mogrify_image=ResizeImage(*image,geometry.width,geometry.height, (*image)->filter,exception); break; } if (LocaleCompare("gravity",option+1) == 0) { if (*option == '+') { draw_info->gravity=UndefinedGravity; break; } draw_info->gravity=(GravityType) ParseCommandOption( MagickGravityOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("grayscale",option+1) == 0) { PixelIntensityMethod method; (void) SyncImageSettings(mogrify_info,*image,exception); method=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,argv[i+1]); (void) GrayscaleImage(*image,method,exception); break; } break; } case 'h': { if (LocaleCompare("highlight-color",option+1) == 0) { (void) SetImageArtifact(*image,"compare:highlight-color",argv[i+1]); break; } if (LocaleCompare("hough-lines",option+1) == 0) { /* Detect edges in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=40; mogrify_image=HoughLineImage(*image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception); break; } break; } case 'i': { if (LocaleCompare("identify",option+1) == 0) { char *text; (void) SyncImageSettings(mogrify_info,*image,exception); if (format == (char *) NULL) { (void) IdentifyImage(*image,stdout,mogrify_info->verbose, exception); break; } text=InterpretImageProperties(mogrify_info,*image,format, exception); if (text == (char *) NULL) break; (void) fputs(text,stdout); text=DestroyString(text); break; } if (LocaleCompare("implode",option+1) == 0) { /* Implode image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=ImplodeImage(*image,geometry_info.rho, interpolate_method,exception); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->interline_spacing=geometry_info.rho; break; } if (LocaleCompare("interpolate",option+1) == 0) { interpolate_method=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->interword_spacing=geometry_info.rho; break; } if (LocaleCompare("interpolative-resize",option+1) == 0) { /* Interpolative resize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=InterpolativeResizeImage(*image,geometry.width, geometry.height,interpolate_method,exception); break; } break; } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->kerning=geometry_info.rho; break; } if (LocaleCompare("kuwahara",option+1) == 0) { /* Edge preserving blur. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho-0.5; mogrify_image=KuwaharaImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } break; } case 'l': { if (LocaleCompare("lat",option+1) == 0) { /* Local adaptive threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=AdaptiveThresholdImage(*image,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,(double) geometry_info.xi,exception); break; } if (LocaleCompare("level",option+1) == 0) { double black_point, gamma, white_point; /* Parse levels. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) (QuantumRange/100.0); white_point*=(double) (QuantumRange/100.0); } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((*option == '+') || ((flags & AspectValue) != 0)) (void) LevelizeImage(*image,black_point,white_point,gamma, exception); else (void) LevelImage(*image,black_point,white_point,gamma, exception); break; } if (LocaleCompare("level-colors",option+1) == 0) { char token[MagickPathExtent]; const char *p; PixelInfo black_point, white_point; p=(const char *) argv[i+1]; GetNextToken(p,&p,MagickPathExtent,token); /* get black point color */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryColorCompliance(token,AllCompliance, &black_point,exception); else (void) QueryColorCompliance("#000000",AllCompliance, &black_point,exception); if (isalpha((int) token[0]) || (token[0] == '#')) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '\0') white_point=black_point; /* set everything to that color */ else { if ((isalpha((int) *token) == 0) && ((*token == '#') == 0)) GetNextToken(p,&p,MagickPathExtent,token); /* Get white point color. */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryColorCompliance(token,AllCompliance, &white_point,exception); else (void) QueryColorCompliance("#ffffff",AllCompliance, &white_point,exception); } (void) LevelImageColors(*image,&black_point,&white_point, *option == '+' ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("linear-stretch",option+1) == 0) { double black_point, white_point; (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(double) (*image)->columns*(*image)->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) (*image)->columns*(*image)->rows/100.0; white_point*=(double) (*image)->columns*(*image)->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) (*image)->columns*(*image)->rows- black_point; (void) LinearStretchImage(*image,black_point,white_point,exception); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { /* Liquid rescale image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); if ((flags & XValue) == 0) geometry.x=1; if ((flags & YValue) == 0) geometry.y=0; mogrify_image=LiquidRescaleImage(*image,geometry.width, geometry.height,1.0*geometry.x,1.0*geometry.y,exception); break; } if (LocaleCompare("local-contrast",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & RhoValue) == 0) geometry_info.rho=10; if ((flags & SigmaValue) == 0) geometry_info.sigma=12.5; mogrify_image=LocalContrastImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("lowlight-color",option+1) == 0) { (void) SetImageArtifact(*image,"compare:lowlight-color",argv[i+1]); break; } break; } case 'm': { if (LocaleCompare("magnify",option+1) == 0) { /* Double image size. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=MagnifyImage(*image,exception); break; } if (LocaleCompare("map",option+1) == 0) { Image *remap_image; /* Transform image colors to match this set of colors. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') break; remap_image=GetImageCache(mogrify_info,argv[i+1],exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(quantize_info,*image,remap_image,exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("mask",option+1) == 0) { Image *mask; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,WritePixelMask,(Image *) NULL, exception); break; } /* Set the image mask. */ mask=GetImageCache(mogrify_info,argv[i+1],exception); if (mask == (Image *) NULL) break; (void) SetImageMask(*image,WritePixelMask,mask,exception); mask=DestroyImage(mask); break; } if (LocaleCompare("matte",option+1) == 0) { (void) SetImageAlphaChannel(*image,(*option == '-') ? SetAlphaChannel : DeactivateAlphaChannel,exception); break; } if (LocaleCompare("mean-shift",option+1) == 0) { /* Detect edges in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=0.10*QuantumRange; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=MeanShiftImage(*image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("median",option+1) == 0) { /* Median filter image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=StatisticImage(*image,MedianStatistic,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); break; } if (LocaleCompare("mode",option+1) == 0) { /* Mode image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=StatisticImage(*image,ModeStatistic,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); break; } if (LocaleCompare("modulate",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) ModulateImage(*image,argv[i+1],exception); break; } if (LocaleCompare("moments",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:moments"); break; } (void) SetImageArtifact(*image,"identify:moments",argv[i+1]); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("monitor",option+1) == 0) { if (*option == '+') { (void) SetImageProgressMonitor(*image, (MagickProgressMonitor) NULL,(void *) NULL); break; } (void) SetImageProgressMonitor(*image,MonitorProgress, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) SetImageType(*image,BilevelType,exception); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MagickPathExtent]; const char *p; KernelInfo *kernel; MorphologyMethod method; ssize_t iterations; /* Morphological Image Operation */ (void) SyncImageSettings(mogrify_info,*image,exception); p=argv[i+1]; GetNextToken(p,&p,MagickPathExtent,token); method=(MorphologyMethod) ParseCommandOption( MagickMorphologyOptions,MagickFalse,token); iterations=1L; GetNextToken(p,&p,MagickPathExtent,token); if ((*p == ':') || (*p == ',')) GetNextToken(p,&p,MagickPathExtent,token); if ((*p != '\0')) iterations=(ssize_t) StringToLong(p); kernel=AcquireKernelInfo(argv[i+2],exception); if (kernel == (KernelInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnabletoParseKernel","morphology"); status=MagickFalse; break; } mogrify_image=MorphologyImage(*image,method,iterations,kernel, exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("motion-blur",option+1) == 0) { /* Motion blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=MotionBlurImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,exception); break; } break; } case 'n': { if (LocaleCompare("negate",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) NegateImage(*image,*option == '+' ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("noise",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '-') { flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=StatisticImage(*image,NonpeakStatistic,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); } else { NoiseType noise; noise=(NoiseType) ParseCommandOption(MagickNoiseOptions, MagickFalse,argv[i+1]); mogrify_image=AddNoiseImage(*image,noise,attenuate,exception); } break; } if (LocaleCompare("normalize",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) NormalizeImage(*image,exception); break; } break; } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { PixelInfo target; (void) SyncImageSettings(mogrify_info,*image,exception); (void) QueryColorCompliance(argv[i+1],AllCompliance,&target, exception); (void) OpaquePaintImage(*image,&target,&fill,*option == '-' ? MagickFalse : MagickTrue,exception); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) OrderedDitherImage(*image,argv[i+1],exception); break; } break; } case 'p': { if (LocaleCompare("paint",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=OilPaintImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("perceptible",option+1) == 0) { /* Perceptible image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) PerceptibleImage(*image,StringToDouble(argv[i+1], (char **) NULL),exception); break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') (void) ParseGeometry("12",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->pointsize=geometry_info.rho; break; } if (LocaleCompare("polaroid",option+1) == 0) { const char *caption; double angle; RandomInfo *random_info; /* Simulate a Polaroid picture. */ (void) SyncImageSettings(mogrify_info,*image,exception); random_info=AcquireRandomInfo(); angle=22.5*(GetPseudoRandomValue(random_info)-0.5); random_info=DestroyRandomInfo(random_info); if (*option == '-') { SetGeometryInfo(&geometry_info); flags=ParseGeometry(argv[i+1],&geometry_info); angle=geometry_info.rho; } caption=GetImageProperty(*image,"caption",exception); mogrify_image=PolaroidImage(*image,draw_info,caption,angle, interpolate_method,exception); break; } if (LocaleCompare("posterize",option+1) == 0) { /* Posterize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) PosterizeImage(*image,StringToUnsignedLong(argv[i+1]), quantize_info->dither_method,exception); break; } if (LocaleCompare("preview",option+1) == 0) { PreviewType preview_type; /* Preview image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') preview_type=UndefinedPreview; else preview_type=(PreviewType) ParseCommandOption( MagickPreviewOptions,MagickFalse,argv[i+1]); mogrify_image=PreviewImage(*image,preview_type,exception); break; } if (LocaleCompare("profile",option+1) == 0) { const char *name; const StringInfo *profile; Image *profile_image; ImageInfo *profile_info; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a profile from the image. */ (void) ProfileImage(*image,argv[i+1],(const unsigned char *) NULL,0,exception); break; } /* Associate a profile with the image. */ profile_info=CloneImageInfo(mogrify_info); profile=GetImageProfile(*image,"iptc"); if (profile != (StringInfo *) NULL) profile_info->profile=(void *) CloneStringInfo(profile); profile_image=GetImageCache(profile_info,argv[i+1],exception); profile_info=DestroyImageInfo(profile_info); if (profile_image == (Image *) NULL) { StringInfo *file_data; profile_info=CloneImageInfo(mogrify_info); (void) CopyMagickString(profile_info->filename,argv[i+1], MagickPathExtent); file_data=FileToStringInfo(profile_info->filename,~0UL, exception); if (file_data != (StringInfo *) NULL) { (void) SetImageInfo(profile_info,0,exception); (void) ProfileImage(*image,profile_info->magick, GetStringInfoDatum(file_data), GetStringInfoLength(file_data),exception); file_data=DestroyStringInfo(file_data); } profile_info=DestroyImageInfo(profile_info); break; } ResetImageProfileIterator(profile_image); name=GetNextImageProfile(profile_image); while (name != (const char *) NULL) { profile=GetImageProfile(profile_image,name); if (profile != (StringInfo *) NULL) (void) ProfileImage(*image,name,GetStringInfoDatum(profile), (size_t) GetStringInfoLength(profile),exception); name=GetNextImageProfile(profile_image); } profile_image=DestroyImage(profile_image); break; } break; } case 'q': { if (LocaleCompare("quantize",option+1) == 0) { if (*option == '+') { quantize_info->colorspace=UndefinedColorspace; break; } quantize_info->colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); break; } break; } case 'r': { if (LocaleCompare("rotational-blur",option+1) == 0) { /* Rotational blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); mogrify_image=RotationalBlurImage(*image,geometry_info.rho, exception); break; } if (LocaleCompare("raise",option+1) == 0) { /* Surround image with a raise of solid color. */ flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); (void) RaiseImage(*image,&geometry,*option == '-' ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("random-threshold",option+1) == 0) { /* Random threshold image. */ double min_threshold, max_threshold; (void) SyncImageSettings(mogrify_info,*image,exception); min_threshold=0.0; max_threshold=(double) QuantumRange; flags=ParseGeometry(argv[i+1],&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(argv[i+1],'%') != (char *) NULL) { max_threshold*=(double) (0.01*QuantumRange); min_threshold*=(double) (0.01*QuantumRange); } (void) RandomThresholdImage(*image,min_threshold,max_threshold, exception); break; } if (LocaleCompare("range-threshold",option+1) == 0) { /* Range threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=geometry_info.sigma; if ((flags & PsiValue) == 0) geometry_info.psi=geometry_info.xi; if (strchr(argv[i+1],'%') != (char *) NULL) { geometry_info.rho*=(double) (0.01*QuantumRange); geometry_info.sigma*=(double) (0.01*QuantumRange); geometry_info.xi*=(double) (0.01*QuantumRange); geometry_info.psi*=(double) (0.01*QuantumRange); } (void) RangeThresholdImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception); break; } if (LocaleCompare("read-mask",option+1) == 0) { Image *mask; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,ReadPixelMask,(Image *) NULL, exception); break; } /* Set the image mask. */ mask=GetImageCache(mogrify_info,argv[i+1],exception); if (mask == (Image *) NULL) break; (void) SetImageMask(*image,ReadPixelMask,mask,exception); mask=DestroyImage(mask); break; } if (LocaleCompare("region",option+1) == 0) { /* Apply read mask as defined by a region geometry. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) SetImageRegionMask(*image,WritePixelMask, (const RectangleInfo *) NULL,exception); break; } (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); (void) SetImageRegionMask(*image,WritePixelMask,&geometry, exception); break; } if (LocaleCompare("render",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); draw_info->render=(*option == '+') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("remap",option+1) == 0) { Image *remap_image; /* Transform image colors to match this set of colors. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') break; remap_image=GetImageCache(mogrify_info,argv[i+1],exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(quantize_info,*image,remap_image,exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("repage",option+1) == 0) { if (*option == '+') { (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); break; } (void) ResetImagePage(*image,argv[i+1]); break; } if (LocaleCompare("resample",option+1) == 0) { /* Resample image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=ResampleImage(*image,geometry_info.rho, geometry_info.sigma,(*image)->filter,exception); break; } if (LocaleCompare("resize",option+1) == 0) { /* Resize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ResizeImage(*image,geometry.width,geometry.height, (*image)->filter,exception); break; } if (LocaleCompare("roll",option+1) == 0) { /* Roll image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); if ((flags & PercentValue) != 0) { geometry.x*=(double) (*image)->columns/100.0; geometry.y*=(double) (*image)->rows/100.0; } mogrify_image=RollImage(*image,geometry.x,geometry.y,exception); break; } if (LocaleCompare("rotate",option+1) == 0) { char *rotation; /* Check for conditional image rotation. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (strchr(argv[i+1],'>') != (char *) NULL) if ((*image)->columns <= (*image)->rows) break; if (strchr(argv[i+1],'<') != (char *) NULL) if ((*image)->columns >= (*image)->rows) break; /* Rotate image. */ rotation=ConstantString(argv[i+1]); (void) SubstituteString(&rotation,">",""); (void) SubstituteString(&rotation,"<",""); (void) ParseGeometry(rotation,&geometry_info); rotation=DestroyString(rotation); mogrify_image=RotateImage(*image,geometry_info.rho,exception); break; } break; } case 's': { if (LocaleCompare("sample",option+1) == 0) { /* Sample image with pixel replication. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=SampleImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("scale",option+1) == 0) { /* Resize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ScaleImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("selective-blur",option+1) == 0) { /* Selectively blur pixels within a contrast threshold. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=SelectiveBlurImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("separate",option+1) == 0) { /* Break channels into separate images. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=SeparateImages(*image,exception); break; } if (LocaleCompare("sepia-tone",option+1) == 0) { double threshold; /* Sepia-tone image. */ (void) SyncImageSettings(mogrify_info,*image,exception); threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); mogrify_image=SepiaToneImage(*image,threshold,exception); break; } if (LocaleCompare("segment",option+1) == 0) { /* Segment image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; (void) SegmentImage(*image,(*image)->colorspace, mogrify_info->verbose,geometry_info.rho,geometry_info.sigma, exception); break; } if (LocaleCompare("set",option+1) == 0) { char *value; /* Set image option. */ if (*option == '+') { if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) DeleteImageRegistry(argv[i+1]+9); else if (LocaleNCompare(argv[i+1],"option:",7) == 0) { (void) DeleteImageOption(mogrify_info,argv[i+1]+7); (void) DeleteImageArtifact(*image,argv[i+1]+7); } else (void) DeleteImageProperty(*image,argv[i+1]); break; } value=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (value == (char *) NULL) break; if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) SetImageRegistry(StringRegistryType,argv[i+1]+9,value, exception); else if (LocaleNCompare(argv[i+1],"option:",7) == 0) { (void) SetImageOption(image_info,argv[i+1]+7,value); (void) SetImageOption(mogrify_info,argv[i+1]+7,value); (void) SetImageArtifact(*image,argv[i+1]+7,value); } else (void) SetImageProperty(*image,argv[i+1],value,exception); value=DestroyString(value); break; } if (LocaleCompare("shade",option+1) == 0) { /* Shade image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=ShadeImage(*image,(*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("shadow",option+1) == 0) { /* Shadow image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=4.0; if ((flags & PsiValue) == 0) geometry_info.psi=4.0; mogrify_image=ShadowImage(*image,geometry_info.rho, geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5), (ssize_t) ceil(geometry_info.psi-0.5),exception); break; } if (LocaleCompare("sharpen",option+1) == 0) { /* Sharpen image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.0; mogrify_image=SharpenImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("shave",option+1) == 0) { /* Shave the image edges. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ShaveImage(*image,&geometry,exception); break; } if (LocaleCompare("shear",option+1) == 0) { /* Shear image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=ShearImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { /* Sigmoidal non-linearity contrast control. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=(double) QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/ 100.0; (void) SigmoidalContrastImage(*image,(*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho,geometry_info.sigma, exception); break; } if (LocaleCompare("sketch",option+1) == 0) { /* Sketch image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=SketchImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("solarize",option+1) == 0) { double threshold; (void) SyncImageSettings(mogrify_info,*image,exception); threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); (void) SolarizeImage(*image,threshold,exception); break; } if (LocaleCompare("sparse-color",option+1) == 0) { SparseColorMethod method; char *arguments; /* Sparse Color Interpolated Gradient */ (void) SyncImageSettings(mogrify_info,*image,exception); method=(SparseColorMethod) ParseCommandOption( MagickSparseColorOptions,MagickFalse,argv[i+1]); arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (arguments == (char *) NULL) break; mogrify_image=SparseColorOption(*image,method,arguments, option[0] == '+' ? MagickTrue : MagickFalse,exception); arguments=DestroyString(arguments); break; } if (LocaleCompare("splice",option+1) == 0) { /* Splice a solid color into the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=SpliceImage(*image,&geometry,exception); break; } if (LocaleCompare("spread",option+1) == 0) { /* Spread an image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=SpreadImage(*image,interpolate_method, geometry_info.rho,exception); break; } if (LocaleCompare("statistic",option+1) == 0) { StatisticType type; (void) SyncImageSettings(mogrify_info,*image,exception); type=(StatisticType) ParseCommandOption(MagickStatisticOptions, MagickFalse,argv[i+1]); (void) ParseGeometry(argv[i+2],&geometry_info); mogrify_image=StatisticImage(*image,type,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,exception); break; } if (LocaleCompare("stretch",option+1) == 0) { if (*option == '+') { draw_info->stretch=UndefinedStretch; break; } draw_info->stretch=(StretchType) ParseCommandOption( MagickStretchOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("strip",option+1) == 0) { /* Strip image of profiles and comments. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) StripImage(*image,exception); break; } if (LocaleCompare("stroke",option+1) == 0) { ExceptionInfo *sans; PixelInfo color; if (*option == '+') { (void) QueryColorCompliance("none",AllCompliance, &draw_info->stroke,exception); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage( draw_info->stroke_pattern); break; } sans=AcquireExceptionInfo(); status=QueryColorCompliance(argv[i+1],AllCompliance,&color,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) draw_info->stroke_pattern=GetImageCache(mogrify_info,argv[i+1], exception); else draw_info->stroke=color; break; } if (LocaleCompare("strokewidth",option+1) == 0) { draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL); break; } if (LocaleCompare("style",option+1) == 0) { if (*option == '+') { draw_info->style=UndefinedStyle; break; } draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,argv[i+1]); break; } if (LocaleCompare("swirl",option+1) == 0) { /* Swirl image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=SwirlImage(*image,geometry_info.rho, interpolate_method,exception); break; } break; } case 't': { if (LocaleCompare("threshold",option+1) == 0) { double threshold; /* Threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') threshold=(double) QuantumRange/2; else threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); (void) BilevelImage(*image,threshold,exception); break; } if (LocaleCompare("thumbnail",option+1) == 0) { /* Thumbnail image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ThumbnailImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("tile",option+1) == 0) { if (*option == '+') { if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); break; } draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1], exception); break; } if (LocaleCompare("tint",option+1) == 0) { /* Tint the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=TintImage(*image,argv[i+1],&fill,exception); break; } if (LocaleCompare("transform",option+1) == 0) { /* Affine transform image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=AffineTransformImage(*image,&draw_info->affine, exception); break; } if (LocaleCompare("transparent",option+1) == 0) { PixelInfo target; (void) SyncImageSettings(mogrify_info,*image,exception); (void) QueryColorCompliance(argv[i+1],AllCompliance,&target, exception); (void) TransparentPaintImage(*image,&target,(Quantum) TransparentAlpha,*option == '-' ? MagickFalse : MagickTrue, exception); break; } if (LocaleCompare("transpose",option+1) == 0) { /* Transpose image scanlines. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=TransposeImage(*image,exception); break; } if (LocaleCompare("transverse",option+1) == 0) { /* Transverse image scanlines. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=TransverseImage(*image,exception); break; } if (LocaleCompare("treedepth",option+1) == 0) { quantize_info->tree_depth=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("trim",option+1) == 0) { /* Trim image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=TrimImage(*image,exception); break; } if (LocaleCompare("type",option+1) == 0) { ImageType type; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') type=UndefinedType; else type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, argv[i+1]); (*image)->type=UndefinedType; (void) SetImageType(*image,type,exception); break; } break; } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { (void) QueryColorCompliance(argv[i+1],AllCompliance, &draw_info->undercolor,exception); break; } if (LocaleCompare("unique",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:unique-colors"); break; } (void) SetImageArtifact(*image,"identify:unique-colors","true"); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("unique-colors",option+1) == 0) { /* Unique image colors. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=UniqueImageColors(*image,exception); break; } if (LocaleCompare("unsharp",option+1) == 0) { /* Unsharp mask image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; if ((flags & PsiValue) == 0) geometry_info.psi=0.05; mogrify_image=UnsharpMaskImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi, exception); break; } break; } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { (void) SetImageArtifact(*image,option+1, *option == '+' ? "false" : "true"); break; } if (LocaleCompare("vignette",option+1) == 0) { /* Vignette image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.1*(*image)->columns; if ((flags & PsiValue) == 0) geometry_info.psi=0.1*(*image)->rows; if ((flags & PercentValue) != 0) { geometry_info.xi*=(double) (*image)->columns/100.0; geometry_info.psi*=(double) (*image)->rows/100.0; } mogrify_image=VignetteImage(*image,geometry_info.rho, geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5), (ssize_t) ceil(geometry_info.psi-0.5),exception); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { if (*option == '+') { (void) SetImageVirtualPixelMethod(*image, UndefinedVirtualPixelMethod,exception); break; } (void) SetImageVirtualPixelMethod(*image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i+1]),exception); break; } break; } case 'w': { if (LocaleCompare("wave",option+1) == 0) { /* Wave image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=WaveImage(*image,geometry_info.rho, geometry_info.sigma,interpolate_method,exception); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { /* Wavelet denoise image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) { geometry_info.rho=QuantumRange*geometry_info.rho/100.0; geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0; } if ((flags & SigmaValue) == 0) geometry_info.sigma=0.0; mogrify_image=WaveletDenoiseImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("weight",option+1) == 0) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse, argv[i+1]); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(argv[i+1]); draw_info->weight=(size_t) weight; break; } if (LocaleCompare("white-threshold",option+1) == 0) { /* White threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) WhiteThresholdImage(*image,argv[i+1],exception); break; } if (LocaleCompare("write-mask",option+1) == 0) { Image *mask; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,WritePixelMask,(Image *) NULL, exception); break; } /* Set the image mask. */ mask=GetImageCache(mogrify_info,argv[i+1],exception); if (mask == (Image *) NULL) break; (void) SetImageMask(*image,WritePixelMask,mask,exception); mask=DestroyImage(mask); break; } break; } default: break; } /* Replace current image with any image that was generated */ if (mogrify_image != (Image *) NULL) ReplaceImageInListReturnLast(image,mogrify_image); i+=count; } /* Free resources. */ quantize_info=DestroyQuantizeInfo(quantize_info); draw_info=DestroyDrawInfo(draw_info); mogrify_info=DestroyImageInfo(mogrify_info); status=(MagickStatusType) (exception->severity < ErrorException ? 1 : 0); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e C o m m a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageCommand() transforms an image or a sequence of images. These % transforms include image scaling, image rotation, color reduction, and % others. The transmogrified image overwrites the original image. % % The format of the MogrifyImageCommand method is: % % MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,int argc, % const char **argv,char **metadata,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o argc: the number of elements in the argument vector. % % o argv: A text array containing the command line arguments. % % o metadata: any metadata is returned here. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType MogrifyUsage(void) { static const char channel_operators[] = " -channel-fx expression\n" " exchange, extract, or transfer one or more image channels\n" " -separate separate an image channel into a grayscale image", miscellaneous[] = " -debug events display copious debugging information\n" " -distribute-cache port\n" " distributed pixel cache spanning one or more servers\n" " -help print program options\n" " -list type print a list of supported option arguments\n" " -log format format of debugging information\n" " -version print version information", operators[] = " -adaptive-blur geometry\n" " adaptively blur pixels; decrease effect near edges\n" " -adaptive-resize geometry\n" " adaptively resize image using 'mesh' interpolation\n" " -adaptive-sharpen geometry\n" " adaptively sharpen pixels; increase effect near edges\n" " -alpha option on, activate, off, deactivate, set, opaque, copy\n" " transparent, extract, background, or shape\n" " -annotate geometry text\n" " annotate the image with text\n" " -auto-gamma automagically adjust gamma level of image\n" " -auto-level automagically adjust color levels of image\n" " -auto-orient automagically orient (rotate) image\n" " -auto-threshold method\n" " automatically perform image thresholding\n" " -bench iterations measure performance\n" " -black-threshold value\n" " force all pixels below the threshold into black\n" " -blue-shift simulate a scene at nighttime in the moonlight\n" " -blur geometry reduce image noise and reduce detail levels\n" " -border geometry surround image with a border of color\n" " -bordercolor color border color\n" " -brightness-contrast geometry\n" " improve brightness / contrast of the image\n" " -canny geometry detect edges in the image\n" " -cdl filename color correct with a color decision list\n" " -channel mask set the image channel mask\n" " -charcoal geometry simulate a charcoal drawing\n" " -chop geometry remove pixels from the image interior\n" " -clahe geometry contrast limited adaptive histogram equalization\n" " -clamp keep pixel values in range (0-QuantumRange)\n" " -clip clip along the first path from the 8BIM profile\n" " -clip-mask filename associate a clip mask with the image\n" " -clip-path id clip along a named path from the 8BIM profile\n" " -colorize value colorize the image with the fill color\n" " -color-matrix matrix apply color correction to the image\n" " -connected-components connectivity\n" " connected-components uniquely labeled\n" " -contrast enhance or reduce the image contrast\n" " -contrast-stretch geometry\n" " improve contrast by 'stretching' the intensity range\n" " -convolve coefficients\n" " apply a convolution kernel to the image\n" " -cycle amount cycle the image colormap\n" " -decipher filename convert cipher pixels to plain pixels\n" " -deskew threshold straighten an image\n" " -despeckle reduce the speckles within an image\n" " -distort method args\n" " distort images according to given method ad args\n" " -draw string annotate the image with a graphic primitive\n" " -edge radius apply a filter to detect edges in the image\n" " -encipher filename convert plain pixels to cipher pixels\n" " -emboss radius emboss an image\n" " -enhance apply a digital filter to enhance a noisy image\n" " -equalize perform histogram equalization to an image\n" " -evaluate operator value\n" " evaluate an arithmetic, relational, or logical expression\n" " -extent geometry set the image size\n" " -extract geometry extract area from image\n" " -fft implements the discrete Fourier transform (DFT)\n" " -flip flip image vertically\n" " -floodfill geometry color\n" " floodfill the image with color\n" " -flop flop image horizontally\n" " -frame geometry surround image with an ornamental border\n" " -function name parameters\n" " apply function over image values\n" " -gamma value level of gamma correction\n" " -gaussian-blur geometry\n" " reduce image noise and reduce detail levels\n" " -geometry geometry preferred size or location of the image\n" " -grayscale method convert image to grayscale\n" " -hough-lines geometry\n" " identify lines in the image\n" " -identify identify the format and characteristics of the image\n" " -ift implements the inverse discrete Fourier transform (DFT)\n" " -implode amount implode image pixels about the center\n" " -interpolative-resize geometry\n" " resize image using interpolation\n" " -kuwahara geometry edge preserving noise reduction filter\n" " -lat geometry local adaptive thresholding\n" " -level value adjust the level of image contrast\n" " -level-colors color,color\n" " level image with the given colors\n" " -linear-stretch geometry\n" " improve contrast by 'stretching with saturation'\n" " -liquid-rescale geometry\n" " rescale image with seam-carving\n" " -local-contrast geometry\n" " enhance local contrast\n" " -magnify double the size of the image with pixel art scaling\n" " -mean-shift geometry delineate arbitrarily shaped clusters in the image\n" " -median geometry apply a median filter to the image\n" " -mode geometry make each pixel the 'predominant color' of the\n" " neighborhood\n" " -modulate value vary the brightness, saturation, and hue\n" " -monochrome transform image to black and white\n" " -morphology method kernel\n" " apply a morphology method to the image\n" " -motion-blur geometry\n" " simulate motion blur\n" " -negate replace every pixel with its complementary color \n" " -noise geometry add or reduce noise in an image\n" " -normalize transform image to span the full range of colors\n" " -opaque color change this color to the fill color\n" " -ordered-dither NxN\n" " add a noise pattern to the image with specific\n" " amplitudes\n" " -paint radius simulate an oil painting\n" " -perceptible epsilon\n" " pixel value less than |epsilon| become epsilon or\n" " -epsilon\n" " -polaroid angle simulate a Polaroid picture\n" " -posterize levels reduce the image to a limited number of color levels\n" " -profile filename add, delete, or apply an image profile\n" " -quantize colorspace reduce colors in this colorspace\n" " -raise value lighten/darken image edges to create a 3-D effect\n" " -random-threshold low,high\n" " random threshold the image\n" " -range-threshold values\n" " perform either hard or soft thresholding within some range of values in an image\n" " -region geometry apply options to a portion of the image\n" " -render render vector graphics\n" " -repage geometry size and location of an image canvas\n" " -resample geometry change the resolution of an image\n" " -resize geometry resize the image\n" " -roll geometry roll an image vertically or horizontally\n" " -rotate degrees apply Paeth rotation to the image\n" " -rotational-blur angle\n" " rotational blur the image\n" " -sample geometry scale image with pixel sampling\n" " -scale geometry scale the image\n" " -segment values segment an image\n" " -selective-blur geometry\n" " selectively blur pixels within a contrast threshold\n" " -sepia-tone threshold\n" " simulate a sepia-toned photo\n" " -set property value set an image property\n" " -shade degrees shade the image using a distant light source\n" " -shadow geometry simulate an image shadow\n" " -sharpen geometry sharpen the image\n" " -shave geometry shave pixels from the image edges\n" " -shear geometry slide one edge of the image along the X or Y axis\n" " -sigmoidal-contrast geometry\n" " increase the contrast without saturating highlights or\n" " shadows\n" " -sketch geometry simulate a pencil sketch\n" " -solarize threshold negate all pixels above the threshold level\n" " -sparse-color method args\n" " fill in a image based on a few color points\n" " -splice geometry splice the background color into the image\n" " -spread radius displace image pixels by a random amount\n" " -statistic type radius\n" " replace each pixel with corresponding statistic from the neighborhood\n" " -strip strip image of all profiles and comments\n" " -swirl degrees swirl image pixels about the center\n" " -threshold value threshold the image\n" " -thumbnail geometry create a thumbnail of the image\n" " -tile filename tile image when filling a graphic primitive\n" " -tint value tint the image with the fill color\n" " -transform affine transform image\n" " -transparent color make this color transparent within the image\n" " -transpose flip image vertically and rotate 90 degrees\n" " -transverse flop image horizontally and rotate 270 degrees\n" " -trim trim image edges\n" " -type type image type\n" " -unique-colors discard all but one of any pixel color\n" " -unsharp geometry sharpen the image\n" " -vignette geometry soften the edges of the image in vignette style\n" " -wave geometry alter an image along a sine wave\n" " -wavelet-denoise threshold\n" " removes noise from the image using a wavelet transform\n" " -white-threshold value\n" " force all pixels above the threshold into white", sequence_operators[] = " -affinity filename transform image colors to match this set of colors\n" " -append append an image sequence\n" " -clut apply a color lookup table to the image\n" " -coalesce merge a sequence of images\n" " -combine combine a sequence of images\n" " -compare mathematically and visually annotate the difference between an image and its reconstruction\n" " -complex operator perform complex mathematics on an image sequence\n" " -composite composite image\n" " -copy geometry offset\n" " copy pixels from one area of an image to another\n" " -crop geometry cut out a rectangular region of the image\n" " -deconstruct break down an image sequence into constituent parts\n" " -evaluate-sequence operator\n" " evaluate an arithmetic, relational, or logical expression\n" " -flatten flatten a sequence of images\n" " -fx expression apply mathematical expression to an image channel(s)\n" " -hald-clut apply a Hald color lookup table to the image\n" " -layers method optimize, merge, or compare image layers\n" " -morph value morph an image sequence\n" " -mosaic create a mosaic from an image sequence\n" " -poly terms build a polynomial from the image sequence and the corresponding\n" " terms (coefficients and degree pairs).\n" " -print string interpret string and print to console\n" " -process arguments process the image with a custom image filter\n" " -smush geometry smush an image sequence together\n" " -write filename write images to this file", settings[] = " -adjoin join images into a single multi-image file\n" " -affine matrix affine transform matrix\n" " -alpha option activate, deactivate, reset, or set the alpha channel\n" " -antialias remove pixel-aliasing\n" " -authenticate password\n" " decipher image with this password\n" " -attenuate value lessen (or intensify) when adding noise to an image\n" " -background color background color\n" " -bias value add bias when convolving an image\n" " -black-point-compensation\n" " use black point compensation\n" " -blue-primary point chromaticity blue primary point\n" " -bordercolor color border color\n" " -caption string assign a caption to an image\n" " -colors value preferred number of colors in the image\n" " -colorspace type alternate image colorspace\n" " -comment string annotate image with comment\n" " -compose operator set image composite operator\n" " -compress type type of pixel compression when writing the image\n" " -define format:option=value\n" " define one or more image format options\n" " -delay value display the next image after pausing\n" " -density geometry horizontal and vertical density of the image\n" " -depth value image depth\n" " -direction type render text right-to-left or left-to-right\n" " -display server get image or font from this X server\n" " -dispose method layer disposal method\n" " -dither method apply error diffusion to image\n" " -encoding type text encoding type\n" " -endian type endianness (MSB or LSB) of the image\n" " -family name render text with this font family\n" " -features distance analyze image features (e.g. contrast, correlation)\n" " -fill color color to use when filling a graphic primitive\n" " -filter type use this filter when resizing an image\n" " -font name render text with this font\n" " -format \"string\" output formatted image characteristics\n" " -fuzz distance colors within this distance are considered equal\n" " -gravity type horizontal and vertical text placement\n" " -green-primary point chromaticity green primary point\n" " -intensity method method to generate an intensity value from a pixel\n" " -intent type type of rendering intent when managing the image color\n" " -interlace type type of image interlacing scheme\n" " -interline-spacing value\n" " set the space between two text lines\n" " -interpolate method pixel color interpolation method\n" " -interword-spacing value\n" " set the space between two words\n" " -kerning value set the space between two letters\n" " -label string assign a label to an image\n" " -limit type value pixel cache resource limit\n" " -loop iterations add Netscape loop extension to your GIF animation\n" " -matte store matte channel if the image has one\n" " -mattecolor color frame color\n" " -monitor monitor progress\n" " -orient type image orientation\n" " -page geometry size and location of an image canvas (setting)\n" " -path path write images to this path on disk\n" " -ping efficiently determine image attributes\n" " -pointsize value font point size\n" " -precision value maximum number of significant digits to print\n" " -preview type image preview type\n" " -quality value JPEG/MIFF/PNG compression level\n" " -quiet suppress all warning messages\n" " -read-mask filename associate a read mask with the image\n" " -red-primary point chromaticity red primary point\n" " -regard-warnings pay attention to warning messages\n" " -remap filename transform image colors to match this set of colors\n" " -respect-parentheses settings remain in effect until parenthesis boundary\n" " -sampling-factor geometry\n" " horizontal and vertical sampling factor\n" " -scene value image scene number\n" " -seed value seed a new sequence of pseudo-random numbers\n" " -size geometry width and height of image\n" " -stretch type render text with this font stretch\n" " -stroke color graphic primitive stroke color\n" " -strokewidth value graphic primitive stroke width\n" " -style type render text with this font style\n" " -synchronize synchronize image to storage device\n" " -taint declare the image as modified\n" " -texture filename name of texture to tile onto the image background\n" " -tile-offset geometry\n" " tile offset\n" " -treedepth value color tree depth\n" " -transparent-color color\n" " transparent color\n" " -undercolor color annotation bounding box color\n" " -units type the units of image resolution\n" " -verbose print detailed information about the image\n" " -view FlashPix viewing transforms\n" " -virtual-pixel method\n" " virtual pixel access method\n" " -weight type render text with this font weight\n" " -white-point point chromaticity white point\n" " -write-mask filename associate a write mask with the image", stack_operators[] = " -delete indexes delete the image from the image sequence\n" " -duplicate count,indexes\n" " duplicate an image one or more times\n" " -insert index insert last image into the image sequence\n" " -reverse reverse image sequence\n" " -swap indexes swap two images in the image sequence"; ListMagickVersion(stdout); (void) printf("Usage: %s [options ...] file [ [options ...] file ...]\n", GetClientName()); (void) printf("\nImage Settings:\n"); (void) puts(settings); (void) printf("\nImage Operators:\n"); (void) puts(operators); (void) printf("\nImage Channel Operators:\n"); (void) puts(channel_operators); (void) printf("\nImage Sequence Operators:\n"); (void) puts(sequence_operators); (void) printf("\nImage Stack Operators:\n"); (void) puts(stack_operators); (void) printf("\nMiscellaneous Options:\n"); (void) puts(miscellaneous); (void) printf( "\nBy default, the image format of 'file' is determined by its magic\n"); (void) printf( "number. To specify a particular image format, precede the filename\n"); (void) printf( "with an image format name and a colon (i.e. ps:image) or specify the\n"); (void) printf( "image type as the filename suffix (i.e. image.ps). Specify 'file' as\n"); (void) printf("'-' for standard input or output.\n"); return(MagickFalse); } WandExport MagickBooleanType MogrifyImageCommand(ImageInfo *image_info, int argc,char **argv,char **wand_unused(metadata),ExceptionInfo *exception) { #define DestroyMogrify() \ { \ if (format != (char *) NULL) \ format=DestroyString(format); \ if (path != (char *) NULL) \ path=DestroyString(path); \ DestroyImageStack(); \ for (i=0; i < (ssize_t) argc; i++) \ argv[i]=DestroyString(argv[i]); \ argv=(char **) RelinquishMagickMemory(argv); \ } #define ThrowMogrifyException(asperity,tag,option) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),asperity,tag,"`%s'", \ option); \ DestroyMogrify(); \ return(MagickFalse); \ } #define ThrowMogrifyInvalidArgumentException(option,argument) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),OptionError, \ "InvalidArgument","'%s': %s",argument,option); \ DestroyMogrify(); \ return(MagickFalse); \ } char *format, *option, *path; Image *image; ImageStack image_stack[MaxImageStackDepth+1]; MagickBooleanType global_colormap; MagickBooleanType fire, pend, respect_parenthesis; MagickStatusType status; register ssize_t i; ssize_t j, k; wand_unreferenced(metadata); /* Set defaults. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(exception != (ExceptionInfo *) NULL); if (argc == 2) { option=argv[1]; if ((LocaleCompare("version",option+1) == 0) || (LocaleCompare("-version",option+1) == 0)) { ListMagickVersion(stdout); return(MagickTrue); } } if (argc < 2) return(MogrifyUsage()); format=(char *) NULL; path=(char *) NULL; global_colormap=MagickFalse; k=0; j=1; NewImageStack(); option=(char *) NULL; pend=MagickFalse; respect_parenthesis=MagickFalse; status=MagickTrue; /* Parse command line. */ ReadCommandlLine(argc,&argv); status=ExpandFilenames(&argc,&argv); if (status == MagickFalse) ThrowMogrifyException(ResourceLimitError,"MemoryAllocationFailed", GetExceptionMessage(errno)); for (i=1; i < (ssize_t) argc; i++) { option=argv[i]; if (LocaleCompare(option,"(") == 0) { FireImageStack(MagickFalse,MagickTrue,pend); if (k == MaxImageStackDepth) ThrowMogrifyException(OptionError,"ParenthesisNestedTooDeeply", option); PushImageStack(); continue; } if (LocaleCompare(option,")") == 0) { FireImageStack(MagickFalse,MagickTrue,MagickTrue); if (k == 0) ThrowMogrifyException(OptionError,"UnableToParseExpression",option); PopImageStack(); continue; } if (IsCommandOption(option) == MagickFalse) { char backup_filename[MagickPathExtent], *filename; Image *images; struct stat properties; /* Option is a file name: begin by reading image from specified file. */ FireImageStack(MagickFalse,MagickFalse,pend); filename=argv[i]; if ((LocaleCompare(filename,"--") == 0) && (i < (ssize_t) (argc-1))) filename=argv[++i]; images=ReadImages(image_info,filename,exception); status&=(images != (Image *) NULL) && (exception->severity < ErrorException); if (images == (Image *) NULL) continue; properties=(*GetBlobProperties(images)); if (format != (char *) NULL) (void) CopyMagickString(images->filename,images->magick_filename, MagickPathExtent); if (path != (char *) NULL) { GetPathComponent(option,TailPath,filename); (void) FormatLocaleString(images->filename,MagickPathExtent, "%s%c%s",path,*DirectorySeparator,filename); } if (format != (char *) NULL) AppendImageFormat(format,images->filename); AppendImageStack(images); FinalizeImageSettings(image_info,image,MagickFalse); if (global_colormap != MagickFalse) { QuantizeInfo *quantize_info; quantize_info=AcquireQuantizeInfo(image_info); (void) RemapImages(quantize_info,images,(Image *) NULL,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } *backup_filename='\0'; if ((LocaleCompare(image->filename,"-") != 0) && (IsPathWritable(image->filename) != MagickFalse)) { /* Rename image file as backup. */ (void) CopyMagickString(backup_filename,image->filename, MagickPathExtent); for (j=0; j < 6; j++) { (void) ConcatenateMagickString(backup_filename,"~", MagickPathExtent); if (IsPathAccessible(backup_filename) == MagickFalse) break; } if ((IsPathAccessible(backup_filename) != MagickFalse) || (rename_utf8(image->filename,backup_filename) != 0)) *backup_filename='\0'; } /* Write transmogrified image to disk. */ image_info->synchronize=MagickTrue; status&=WriteImages(image_info,image,image->filename,exception); if (status != MagickFalse) { #if defined(MAGICKCORE_HAVE_UTIME) { MagickBooleanType preserve_timestamp; preserve_timestamp=IsStringTrue(GetImageOption(image_info, "preserve-timestamp")); if (preserve_timestamp != MagickFalse) { struct utimbuf timestamp; timestamp.actime=properties.st_atime; timestamp.modtime=properties.st_mtime; (void) utime(image->filename,&timestamp); } } #endif if (*backup_filename != '\0') (void) remove_utf8(backup_filename); } RemoveAllImageStack(); continue; } pend=image != (Image *) NULL ? MagickTrue : MagickFalse; switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("affine",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("alpha",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickAlphaChannelOptions,MagickFalse, argv[i]); if (type < 0) ThrowMogrifyException(OptionError, "UnrecognizedAlphaChannelOption",argv[i]); break; } if (LocaleCompare("annotate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); i++; break; } if (LocaleCompare("antialias",option+1) == 0) break; if (LocaleCompare("append",option+1) == 0) break; if (LocaleCompare("attenuate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("authenticate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("auto-gamma",option+1) == 0) break; if (LocaleCompare("auto-level",option+1) == 0) break; if (LocaleCompare("auto-orient",option+1) == 0) break; if (LocaleCompare("auto-threshold",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickAutoThresholdOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedThresholdMethod", argv[i]); break; } if (LocaleCompare("average",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'b': { if (LocaleCompare("background",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("bias",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) break; if (LocaleCompare("black-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blue-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blue-shift",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("border",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("box",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'c': { if (LocaleCompare("cache",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("canny",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("caption",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("channel",option+1) == 0) { ssize_t channel; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); channel=ParseChannelOption(argv[i]); if (channel < 0) ThrowMogrifyException(OptionError,"UnrecognizedChannelType", argv[i]); break; } if (LocaleCompare("channel-fx",option+1) == 0) { ssize_t channel; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); channel=ParsePixelChannelOption(argv[i]); if (channel < 0) ThrowMogrifyException(OptionError,"UnrecognizedChannelType", argv[i]); break; } if (LocaleCompare("cdl",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("charcoal",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("chop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("clahe",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("clamp",option+1) == 0) break; if (LocaleCompare("clip",option+1) == 0) break; if (LocaleCompare("clip-mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("clut",option+1) == 0) break; if (LocaleCompare("coalesce",option+1) == 0) break; if (LocaleCompare("colorize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel_info; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i],exception); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("colors",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("colorspace",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("combine",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("compare",option+1) == 0) break; if (LocaleCompare("comment",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("composite",option+1) == 0) break; if (LocaleCompare("compress",option+1) == 0) { ssize_t compress; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); compress=ParseCommandOption(MagickCompressOptions,MagickFalse, argv[i]); if (compress < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageCompression", argv[i]); break; } if (LocaleCompare("concurrent",option+1) == 0) break; if (LocaleCompare("connected-components",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("contrast",option+1) == 0) break; if (LocaleCompare("contrast-stretch",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("convolve",option+1) == 0) { KernelInfo *kernel_info; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i],exception); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("copy",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("crop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("cycle",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("deconstruct",option+1) == 0) break; if (LocaleCompare("debug",option+1) == 0) { ssize_t event; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); event=ParseCommandOption(MagickLogEventOptions,MagickFalse,argv[i]); if (event < 0) ThrowMogrifyException(OptionError,"UnrecognizedEventType", argv[i]); (void) SetLogEventMask(argv[i]); break; } if (LocaleCompare("define",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') { const char *define; define=GetImageOption(image_info,argv[i]); if (define == (const char *) NULL) ThrowMogrifyException(OptionError,"NoSuchOption",argv[i]); break; } break; } if (LocaleCompare("delay",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("delete",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("density",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("depth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("deskew",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("despeckle",option+1) == 0) break; if (LocaleCompare("dft",option+1) == 0) break; if (LocaleCompare("direction",option+1) == 0) { ssize_t direction; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, argv[i]); if (direction < 0) ThrowMogrifyException(OptionError,"UnrecognizedDirectionType", argv[i]); break; } if (LocaleCompare("display",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("dispose",option+1) == 0) { ssize_t dispose; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse, argv[i]); if (dispose < 0) ThrowMogrifyException(OptionError,"UnrecognizedDisposeMethod", argv[i]); break; } if (LocaleCompare("distort",option+1) == 0) { ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickDistortOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedDistortMethod", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("dither",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickDitherOptions,MagickFalse,argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedDitherMethod", argv[i]); break; } if (LocaleCompare("draw",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("duplicate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("duration",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'e': { if (LocaleCompare("edge",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("emboss",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("encipher",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("encoding",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("endian",option+1) == 0) { ssize_t endian; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); endian=ParseCommandOption(MagickEndianOptions,MagickFalse,argv[i]); if (endian < 0) ThrowMogrifyException(OptionError,"UnrecognizedEndianType", argv[i]); break; } if (LocaleCompare("enhance",option+1) == 0) break; if (LocaleCompare("equalize",option+1) == 0) break; if (LocaleCompare("evaluate",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("evaluate-sequence",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator", argv[i]); break; } if (LocaleCompare("extent",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("extract",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("features",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("fill",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("filter",option+1) == 0) { ssize_t filter; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); filter=ParseCommandOption(MagickFilterOptions,MagickFalse,argv[i]); if (filter < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageFilter", argv[i]); break; } if (LocaleCompare("flatten",option+1) == 0) break; if (LocaleCompare("flip",option+1) == 0) break; if (LocaleCompare("flop",option+1) == 0) break; if (LocaleCompare("floodfill",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("format",option+1) == 0) { (void) CopyMagickString(argv[i]+1,"sans",MagickPathExtent); (void) CloneString(&format,(char *) NULL); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); (void) CloneString(&format,argv[i]); (void) CopyMagickString(image_info->filename,format, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,":", MagickPathExtent); (void) SetImageInfo(image_info,0,exception); if (*image_info->magick == '\0') ThrowMogrifyException(OptionError,"UnrecognizedImageFormat", format); break; } if (LocaleCompare("frame",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("function",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickFunctionOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedFunction",argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("fuzz",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("fx",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if ((LocaleCompare("gaussian-blur",option+1) == 0) || (LocaleCompare("gaussian",option+1) == 0)) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("geometry",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("gravity",option+1) == 0) { ssize_t gravity; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse, argv[i]); if (gravity < 0) ThrowMogrifyException(OptionError,"UnrecognizedGravityType", argv[i]); break; } if (LocaleCompare("grayscale",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickPixelIntensityOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedIntensityMethod", argv[i]); break; } if (LocaleCompare("green-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) break; if ((LocaleCompare("help",option+1) == 0) || (LocaleCompare("-help",option+1) == 0)) return(MogrifyUsage()); if (LocaleCompare("hough-lines",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'i': { if (LocaleCompare("identify",option+1) == 0) break; if (LocaleCompare("idft",option+1) == 0) break; if (LocaleCompare("implode",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("intensity",option+1) == 0) { ssize_t intensity; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); intensity=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,argv[i]); if (intensity < 0) ThrowMogrifyException(OptionError, "UnrecognizedPixelIntensityMethod",argv[i]); break; } if (LocaleCompare("intent",option+1) == 0) { ssize_t intent; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); intent=ParseCommandOption(MagickIntentOptions,MagickFalse,argv[i]); if (intent < 0) ThrowMogrifyException(OptionError,"UnrecognizedIntentType", argv[i]); break; } if (LocaleCompare("interlace",option+1) == 0) { ssize_t interlace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); interlace=ParseCommandOption(MagickInterlaceOptions,MagickFalse, argv[i]); if (interlace < 0) ThrowMogrifyException(OptionError,"UnrecognizedInterlaceType", argv[i]); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("interpolate",option+1) == 0) { ssize_t interpolate; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); interpolate=ParseCommandOption(MagickInterpolateOptions,MagickFalse, argv[i]); if (interpolate < 0) ThrowMogrifyException(OptionError,"UnrecognizedInterpolateMethod", argv[i]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("kuwahara",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'l': { if (LocaleCompare("label",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("lat",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); } if (LocaleCompare("layers",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickLayerOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedLayerMethod", argv[i]); break; } if (LocaleCompare("level",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("level-colors",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("limit",option+1) == 0) { char *p; double value; ssize_t resource; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); resource=ParseCommandOption(MagickResourceOptions,MagickFalse, argv[i]); if (resource < 0) ThrowMogrifyException(OptionError,"UnrecognizedResourceType", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); value=StringToDouble(argv[i],&p); (void) value; if ((p == argv[i]) && (LocaleCompare("unlimited",argv[i]) != 0)) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("list",option+1) == 0) { ssize_t list; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i]); if (list < 0) ThrowMogrifyException(OptionError,"UnrecognizedListType",argv[i]); status=MogrifyImageInfo(image_info,(int) (i-j+1),(const char **) argv+j,exception); return(status == 0 ? MagickFalse : MagickTrue); } if (LocaleCompare("log",option+1) == 0) { if (*option == '+') break; i++; if ((i == (ssize_t) argc) || (strchr(argv[i],'%') == (char *) NULL)) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("loop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'm': { if (LocaleCompare("magnify",option+1) == 0) break; if (LocaleCompare("map",option+1) == 0) { global_colormap=(*option == '+') ? MagickTrue : MagickFalse; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("matte",option+1) == 0) break; if (LocaleCompare("mattecolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("maximum",option+1) == 0) break; if (LocaleCompare("mean-shift",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("median",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("metric",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickMetricOptions,MagickTrue,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedMetricType", argv[i]); break; } if (LocaleCompare("minimum",option+1) == 0) break; if (LocaleCompare("modulate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("mode",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("monitor",option+1) == 0) break; if (LocaleCompare("monochrome",option+1) == 0) break; if (LocaleCompare("morph",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MagickPathExtent]; KernelInfo *kernel_info; ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); GetNextToken(argv[i],(const char **) NULL,MagickPathExtent,token); op=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedMorphologyMethod", token); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i],exception); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("mosaic",option+1) == 0) break; if (LocaleCompare("motion-blur",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'n': { if (LocaleCompare("negate",option+1) == 0) break; if (LocaleCompare("noise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') { ssize_t noise; noise=ParseCommandOption(MagickNoiseOptions,MagickFalse, argv[i]); if (noise < 0) ThrowMogrifyException(OptionError,"UnrecognizedNoiseType", argv[i]); break; } if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("noop",option+1) == 0) break; if (LocaleCompare("normalize",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("orient",option+1) == 0) { ssize_t orientation; orientation=UndefinedOrientation; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); orientation=ParseCommandOption(MagickOrientationOptions,MagickFalse, argv[i]); if (orientation < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageOrientation", argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'p': { if (LocaleCompare("page",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("paint",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("path",option+1) == 0) { (void) CloneString(&path,(char *) NULL); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); (void) CloneString(&path,argv[i]); break; } if (LocaleCompare("perceptible",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("polaroid",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("poly",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("posterize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("precision",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("print",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("process",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("profile",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'q': { if (LocaleCompare("quality",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("quantize",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("quiet",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'r': { if (LocaleCompare("rotational-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("raise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("random-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("range-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("read-mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("red-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); } if (LocaleCompare("regard-warnings",option+1) == 0) break; if (LocaleCompare("region",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("remap",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("render",option+1) == 0) break; if (LocaleCompare("repage",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("resample",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("resize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleNCompare("respect-parentheses",option+1,17) == 0) { respect_parenthesis=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("reverse",option+1) == 0) break; if (LocaleCompare("roll",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("rotate",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 's': { if (LocaleCompare("sample",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sampling-factor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("scale",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("scene",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("seed",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("segment",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("selective-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("separate",option+1) == 0) break; if (LocaleCompare("sepia-tone",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("set",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("shade",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shadow",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sharpen",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shave",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shear",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("size",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sketch",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("smush",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; break; } if (LocaleCompare("solarize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sparse-color",option+1) == 0) { ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickSparseColorOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedSparseColorMethod", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("splice",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("spread",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("statistic",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickStatisticOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedStatisticType", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("stretch",option+1) == 0) { ssize_t stretch; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse, argv[i]); if (stretch < 0) ThrowMogrifyException(OptionError,"UnrecognizedStyleType", argv[i]); break; } if (LocaleCompare("strip",option+1) == 0) break; if (LocaleCompare("stroke",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("style",option+1) == 0) { ssize_t style; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); style=ParseCommandOption(MagickStyleOptions,MagickFalse,argv[i]); if (style < 0) ThrowMogrifyException(OptionError,"UnrecognizedStyleType", argv[i]); break; } if (LocaleCompare("swap",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("swirl",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("synchronize",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 't': { if (LocaleCompare("taint",option+1) == 0) break; if (LocaleCompare("texture",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("tile",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("tile-offset",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("tint",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("transform",option+1) == 0) break; if (LocaleCompare("transpose",option+1) == 0) break; if (LocaleCompare("transverse",option+1) == 0) break; if (LocaleCompare("threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("thumbnail",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("transparent",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("transparent-color",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("treedepth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("trim",option+1) == 0) break; if (LocaleCompare("type",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickTypeOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageType", argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("unique-colors",option+1) == 0) break; if (LocaleCompare("units",option+1) == 0) { ssize_t units; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); units=ParseCommandOption(MagickResolutionOptions,MagickFalse, argv[i]); if (units < 0) ThrowMogrifyException(OptionError,"UnrecognizedUnitsType", argv[i]); break; } if (LocaleCompare("unsharp",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { image_info->verbose=(*option == '-') ? MagickTrue : MagickFalse; break; } if ((LocaleCompare("version",option+1) == 0) || (LocaleCompare("-version",option+1) == 0)) { ListMagickVersion(stdout); break; } if (LocaleCompare("vignette",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError, "UnrecognizedVirtualPixelMethod",argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'w': { if (LocaleCompare("wave",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("weight",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("white-point",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("white-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("write",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("write-mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case '?': break; default: ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } fire=(GetCommandOptionFlags(MagickCommandOptions,MagickFalse,option) & FireOptionFlag) == 0 ? MagickFalse : MagickTrue; if (fire != MagickFalse) FireImageStack(MagickFalse,MagickTrue,MagickTrue); } if (k != 0) ThrowMogrifyException(OptionError,"UnbalancedParenthesis",argv[i]); if (i != (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingAnImageFilename",argv[i]); DestroyMogrify(); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageInfo() applies image processing settings to the image as % prescribed by command line options. % % The format of the MogrifyImageInfo method is: % % MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,const int argc, % const char **argv,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info, const int argc,const char **argv,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; ssize_t count; register ssize_t i; /* Initialize method variables. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (argc < 0) return(MagickTrue); /* Set the image settings. */ for (i=0; i < (ssize_t) argc; i++) { option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=ParseCommandOption(MagickCommandOptions,MagickFalse,option); count=MagickMax(count,0L); if ((i+count) >= (ssize_t) argc) break; switch (*(option+1)) { case 'a': { if (LocaleCompare("adjoin",option+1) == 0) { image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("antialias",option+1) == 0) { image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("authenticate",option+1) == 0) { if (*option == '+') (void) DeleteImageOption(image_info,option+1); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'b': { if (LocaleCompare("background",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) QueryColorCompliance(MogrifyBackgroundColor, AllCompliance,&image_info->background_color,exception); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorCompliance(argv[i+1],AllCompliance, &image_info->background_color,exception); break; } if (LocaleCompare("bias",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,"convolve:bias","0.0"); break; } (void) SetImageOption(image_info,"convolve:bias",argv[i+1]); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("blue-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) QueryColorCompliance(MogrifyBorderColor,AllCompliance, &image_info->border_color,exception); break; } (void) QueryColorCompliance(argv[i+1],AllCompliance, &image_info->border_color,exception); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("box",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,"undercolor","none"); break; } (void) SetImageOption(image_info,"undercolor",argv[i+1]); break; } break; } case 'c': { if (LocaleCompare("cache",option+1) == 0) { MagickSizeType limit; limit=MagickResourceInfinity; if (LocaleCompare("unlimited",argv[i+1]) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1], 100.0); (void) SetMagickResourceLimit(MemoryResource,limit); (void) SetMagickResourceLimit(MapResource,2*limit); break; } if (LocaleCompare("caption",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("colorspace",option+1) == 0) { if (*option == '+') { image_info->colorspace=UndefinedColorspace; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("comment",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("compose",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("compress",option+1) == 0) { if (*option == '+') { image_info->compression=UndefinedCompression; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'd': { if (LocaleCompare("debug",option+1) == 0) { if (*option == '+') (void) SetLogEventMask("none"); else (void) SetLogEventMask(argv[i+1]); image_info->debug=IsEventLogging(); break; } if (LocaleCompare("define",option+1) == 0) { if (*option == '+') { if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) DeleteImageRegistry(argv[i+1]+9); else (void) DeleteImageOption(image_info,argv[i+1]); break; } if (LocaleNCompare(argv[i+1],"registry:",9) == 0) { (void) DefineImageRegistry(StringRegistryType,argv[i+1]+9, exception); break; } (void) DefineImageOption(image_info,argv[i+1]); break; } if (LocaleCompare("delay",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("density",option+1) == 0) { /* Set image density. */ if (*option == '+') { if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); (void) SetImageOption(image_info,option+1,"72"); break; } (void) CloneString(&image_info->density,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("depth",option+1) == 0) { if (*option == '+') { image_info->depth=MAGICKCORE_QUANTUM_DEPTH; break; } image_info->depth=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("direction",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("display",option+1) == 0) { if (*option == '+') { if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); break; } (void) CloneString(&image_info->server_name,argv[i+1]); break; } if (LocaleCompare("dispose",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { image_info->dither=MagickFalse; (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); image_info->dither=MagickTrue; break; } break; } case 'e': { if (LocaleCompare("encoding",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("endian",option+1) == 0) { if (*option == '+') { image_info->endian=UndefinedEndian; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->endian=(EndianType) ParseCommandOption( MagickEndianOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("extract",option+1) == 0) { /* Set image extract geometry. */ if (*option == '+') { if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); break; } (void) CloneString(&image_info->extract,argv[i+1]); break; } break; } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option != '+') (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("fill",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("filter",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') { if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); break; } (void) CloneString(&image_info->font,argv[i+1]); break; } if (LocaleCompare("format",option+1) == 0) { register const char *q; for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%')) if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL) image_info->ping=MagickFalse; (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("fuzz",option+1) == 0) { if (*option == '+') { image_info->fuzz=0.0; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->fuzz=StringToDoubleInterval(argv[i+1],(double) QuantumRange+1.0); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'g': { if (LocaleCompare("gravity",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("green-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'i': { if (LocaleCompare("intensity",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("intent",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interlace",option+1) == 0) { if (*option == '+') { image_info->interlace=UndefinedInterlace; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->interlace=(InterlaceType) ParseCommandOption( MagickInterlaceOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interpolate",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'l': { if (LocaleCompare("label",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("limit",option+1) == 0) { MagickSizeType limit; ResourceType type; if (*option == '+') break; type=(ResourceType) ParseCommandOption(MagickResourceOptions, MagickFalse,argv[i+1]); limit=MagickResourceInfinity; if (LocaleCompare("unlimited",argv[i+2]) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0); (void) SetMagickResourceLimit(type,limit); break; } if (LocaleCompare("list",option+1) == 0) { ssize_t list; /* Display configuration list. */ list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]); switch (list) { case MagickCoderOptions: { (void) ListCoderInfo((FILE *) NULL,exception); break; } case MagickColorOptions: { (void) ListColorInfo((FILE *) NULL,exception); break; } case MagickConfigureOptions: { (void) ListConfigureInfo((FILE *) NULL,exception); break; } case MagickDelegateOptions: { (void) ListDelegateInfo((FILE *) NULL,exception); break; } case MagickFontOptions: { (void) ListTypeInfo((FILE *) NULL,exception); break; } case MagickFormatOptions: { (void) ListMagickInfo((FILE *) NULL,exception); break; } case MagickLocaleOptions: { (void) ListLocaleInfo((FILE *) NULL,exception); break; } case MagickLogOptions: { (void) ListLogInfo((FILE *) NULL,exception); break; } case MagickMagicOptions: { (void) ListMagicInfo((FILE *) NULL,exception); break; } case MagickMimeOptions: { (void) ListMimeInfo((FILE *) NULL,exception); break; } case MagickModuleOptions: { (void) ListModuleInfo((FILE *) NULL,exception); break; } case MagickPolicyOptions: { (void) ListPolicyInfo((FILE *) NULL,exception); break; } case MagickResourceOptions: { (void) ListMagickResourceInfo((FILE *) NULL,exception); break; } case MagickThresholdOptions: { (void) ListThresholdMaps((FILE *) NULL,exception); break; } default: { (void) ListCommandOptions((FILE *) NULL,(CommandOption) list, exception); break; } } break; } if (LocaleCompare("log",option+1) == 0) { if (*option == '+') break; (void) SetLogFormat(argv[i+1]); break; } if (LocaleCompare("loop",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'm': { if (LocaleCompare("matte",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("mattecolor",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorCompliance(MogrifyAlphaColor,AllCompliance, &image_info->matte_color,exception); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorCompliance(argv[i+1],AllCompliance, &image_info->matte_color,exception); break; } if (LocaleCompare("metric",option+1) == 0) { if (*option == '+') (void) DeleteImageOption(image_info,option+1); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("monitor",option+1) == 0) { (void) SetImageInfoProgressMonitor(image_info,MonitorProgress, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse; break; } break; } case 'o': { if (LocaleCompare("orient",option+1) == 0) { if (*option == '+') { image_info->orientation=UndefinedOrientation; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } } case 'p': { if (LocaleCompare("page",option+1) == 0) { char *canonical_page, page[MagickPathExtent]; const char *image_option; MagickStatusType flags; RectangleInfo geometry; if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) CloneString(&image_info->page,(char *) NULL); break; } (void) memset(&geometry,0,sizeof(geometry)); image_option=GetImageOption(image_info,"page"); if (image_option != (const char *) NULL) flags=ParseAbsoluteGeometry(image_option,&geometry); canonical_page=GetPageGeometry(argv[i+1]); flags=ParseAbsoluteGeometry(canonical_page,&geometry); canonical_page=DestroyString(canonical_page); (void) FormatLocaleString(page,MagickPathExtent,"%lux%lu", (unsigned long) geometry.width,(unsigned long) geometry.height); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) FormatLocaleString(page,MagickPathExtent,"%lux%lu%+ld%+ld", (unsigned long) geometry.width,(unsigned long) geometry.height, (long) geometry.x,(long) geometry.y); (void) SetImageOption(image_info,option+1,page); (void) CloneString(&image_info->page,page); break; } if (LocaleCompare("ping",option+1) == 0) { image_info->ping=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') geometry_info.rho=0.0; else (void) ParseGeometry(argv[i+1],&geometry_info); image_info->pointsize=geometry_info.rho; break; } if (LocaleCompare("precision",option+1) == 0) { (void) SetMagickPrecision(StringToInteger(argv[i+1])); break; } break; } case 'q': { if (LocaleCompare("quality",option+1) == 0) { /* Set image compression quality. */ if (*option == '+') { image_info->quality=UndefinedCompressionQuality; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->quality=StringToUnsignedLong(argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("quiet",option+1) == 0) { static WarningHandler warning_handler = (WarningHandler) NULL; if (*option == '+') { /* Restore error or warning messages. */ warning_handler=SetWarningHandler(warning_handler); break; } /* Suppress error or warning messages. */ warning_handler=SetWarningHandler((WarningHandler) NULL); break; } break; } case 'r': { if (LocaleCompare("red-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 's': { if (LocaleCompare("sampling-factor",option+1) == 0) { /* Set image sampling factor. */ if (*option == '+') { if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); break; } (void) CloneString(&image_info->sampling_factor,argv[i+1]); break; } if (LocaleCompare("scene",option+1) == 0) { /* Set image scene. */ if (*option == '+') { image_info->scene=0; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->scene=StringToUnsignedLong(argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("seed",option+1) == 0) { unsigned long seed; if (*option == '+') { seed=(unsigned long) time((time_t *) NULL); SetRandomSecretKey(seed); break; } seed=StringToUnsignedLong(argv[i+1]); SetRandomSecretKey(seed); break; } if (LocaleCompare("size",option+1) == 0) { if (*option == '+') { if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); break; } (void) CloneString(&image_info->size,argv[i+1]); break; } if (LocaleCompare("stroke",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("style",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("synchronize",option+1) == 0) { if (*option == '+') { image_info->synchronize=MagickFalse; break; } image_info->synchronize=MagickTrue; break; } break; } case 't': { if (LocaleCompare("taint",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("texture",option+1) == 0) { if (*option == '+') { if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); break; } (void) CloneString(&image_info->texture,argv[i+1]); break; } if (LocaleCompare("tile-offset",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("transparent-color",option+1) == 0) { if (*option == '+') { (void) QueryColorCompliance("none",AllCompliance, &image_info->transparent_color,exception); (void) SetImageOption(image_info,option+1,"none"); break; } (void) QueryColorCompliance(argv[i+1],AllCompliance, &image_info->transparent_color,exception); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("type",option+1) == 0) { if (*option == '+') { image_info->type=UndefinedType; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions, MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { if (*option == '+') (void) DeleteImageOption(image_info,option+1); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("units",option+1) == 0) { if (*option == '+') { image_info->units=UndefinedResolution; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->units=(ResolutionType) ParseCommandOption( MagickResolutionOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { if (*option == '+') { image_info->verbose=MagickFalse; break; } image_info->verbose=MagickTrue; image_info->ping=MagickFalse; break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"undefined"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'w': { if (LocaleCompare("weight",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("white-point",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0.0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } default: break; } i+=count; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageList() applies any command line options that might affect the % entire image list (e.g. -append, -coalesce, etc.). % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImageList(ImageInfo *image_info,const int argc, % const char **argv,Image **images,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o images: pointer to pointer of the first image in image list. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info, const int argc,const char **argv,Image **images,ExceptionInfo *exception) { const char *option; ImageInfo *mogrify_info; MagickStatusType status; PixelInterpolateMethod interpolate_method; QuantizeInfo *quantize_info; register ssize_t i; ssize_t count, index; /* Apply options to the image list. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image **) NULL); assert((*images)->previous == (Image *) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); if ((argc <= 0) || (*argv == (char *) NULL)) return(MagickTrue); interpolate_method=UndefinedInterpolatePixel; mogrify_info=CloneImageInfo(image_info); quantize_info=AcquireQuantizeInfo(mogrify_info); status=MagickTrue; for (i=0; i < (ssize_t) argc; i++) { if (*images == (Image *) NULL) break; option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=ParseCommandOption(MagickCommandOptions,MagickFalse,option); count=MagickMax(count,0L); if ((i+count) >= (ssize_t) argc) break; status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception); switch (*(option+1)) { case 'a': { if (LocaleCompare("affinity",option+1) == 0) { (void) SyncImagesSettings(mogrify_info,*images,exception); if (*option == '+') { (void) RemapImages(quantize_info,*images,(Image *) NULL, exception); break; } i++; break; } if (LocaleCompare("append",option+1) == 0) { Image *append_image; (void) SyncImagesSettings(mogrify_info,*images,exception); append_image=AppendImages(*images,*option == '-' ? MagickTrue : MagickFalse,exception); if (append_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=append_image; break; } if (LocaleCompare("average",option+1) == 0) { Image *average_image; /* Average an image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images,exception); average_image=EvaluateImages(*images,MeanEvaluateOperator, exception); if (average_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=average_image; break; } break; } case 'c': { if (LocaleCompare("channel-fx",option+1) == 0) { Image *channel_image; (void) SyncImagesSettings(mogrify_info,*images,exception); channel_image=ChannelFxImage(*images,argv[i+1],exception); if (channel_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=channel_image; break; } if (LocaleCompare("clut",option+1) == 0) { Image *clut_image, *image; (void) SyncImagesSettings(mogrify_info,*images,exception); image=RemoveFirstImageFromList(images); clut_image=RemoveFirstImageFromList(images); if (clut_image == (Image *) NULL) { status=MagickFalse; break; } (void) ClutImage(image,clut_image,interpolate_method,exception); clut_image=DestroyImage(clut_image); *images=DestroyImageList(*images); *images=image; break; } if (LocaleCompare("coalesce",option+1) == 0) { Image *coalesce_image; (void) SyncImagesSettings(mogrify_info,*images,exception); coalesce_image=CoalesceImages(*images,exception); if (coalesce_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=coalesce_image; break; } if (LocaleCompare("combine",option+1) == 0) { ColorspaceType colorspace; Image *combine_image; (void) SyncImagesSettings(mogrify_info,*images,exception); colorspace=(*images)->colorspace; if ((*images)->number_channels < GetImageListLength(*images)) colorspace=sRGBColorspace; if (*option == '+') colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); combine_image=CombineImages(*images,colorspace,exception); if (combine_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=combine_image; break; } if (LocaleCompare("compare",option+1) == 0) { double distortion; Image *difference_image, *image, *reconstruct_image; MetricType metric; /* Mathematically and visually annotate the difference between an image and its reconstruction. */ (void) SyncImagesSettings(mogrify_info,*images,exception); image=RemoveFirstImageFromList(images); reconstruct_image=RemoveFirstImageFromList(images); if (reconstruct_image == (Image *) NULL) { status=MagickFalse; break; } metric=UndefinedErrorMetric; option=GetImageOption(mogrify_info,"metric"); if (option != (const char *) NULL) metric=(MetricType) ParseCommandOption(MagickMetricOptions, MagickFalse,option); difference_image=CompareImages(image,reconstruct_image,metric, &distortion,exception); if (difference_image == (Image *) NULL) break; reconstruct_image=DestroyImage(reconstruct_image); image=DestroyImage(image); if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=difference_image; break; } if (LocaleCompare("complex",option+1) == 0) { ComplexOperator op; Image *complex_images; (void) SyncImageSettings(mogrify_info,*images,exception); op=(ComplexOperator) ParseCommandOption(MagickComplexOptions, MagickFalse,argv[i+1]); complex_images=ComplexImages(*images,op,exception); if (complex_images == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=complex_images; break; } if (LocaleCompare("composite",option+1) == 0) { CompositeOperator compose; const char* value; MagickBooleanType clip_to_self; Image *mask_image, *new_images, *source_image; RectangleInfo geometry; /* Compose value from "-compose" option only */ (void) SyncImageSettings(mogrify_info,*images,exception); value=GetImageOption(mogrify_info,"compose"); if (value == (const char *) NULL) compose=OverCompositeOp; /* use Over not source_image->compose */ else compose=(CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,value); /* Get "clip-to-self" expert setting (false is normal) */ clip_to_self=GetCompositeClipToSelf(compose); value=GetImageOption(mogrify_info,"compose:clip-to-self"); if (value != (const char *) NULL) clip_to_self=IsStringTrue(value); value=GetImageOption(mogrify_info,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsStringFalse(value); /* deprecated */ new_images=RemoveFirstImageFromList(images); source_image=RemoveFirstImageFromList(images); if (source_image == (Image *) NULL) break; /* FUTURE - produce Exception, rather than silent fail */ /* FUTURE: this should not be here! - should be part of -geometry */ if (source_image->geometry != (char *) NULL) { RectangleInfo resize_geometry; (void) ParseRegionGeometry(source_image,source_image->geometry, &resize_geometry,exception); if ((source_image->columns != resize_geometry.width) || (source_image->rows != resize_geometry.height)) { Image *resize_image; resize_image=ResizeImage(source_image,resize_geometry.width, resize_geometry.height,source_image->filter,exception); if (resize_image != (Image *) NULL) { source_image=DestroyImage(source_image); source_image=resize_image; } } } SetGeometry(source_image,&geometry); (void) ParseAbsoluteGeometry(source_image->geometry,&geometry); GravityAdjustGeometry(new_images->columns,new_images->rows, new_images->gravity,&geometry); mask_image=RemoveFirstImageFromList(images); if (mask_image == (Image *) NULL) status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,exception); else { if ((compose == DisplaceCompositeOp) || (compose == DistortCompositeOp)) { status&=CompositeImage(source_image,mask_image, CopyGreenCompositeOp,MagickTrue,0,0,exception); status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,exception); } else { Image *clone_image; clone_image=CloneImage(new_images,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) break; status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,exception); status&=CompositeImage(new_images,mask_image, CopyAlphaCompositeOp,MagickTrue,0,0,exception); status&=CompositeImage(clone_image,new_images, OverCompositeOp,clip_to_self,0,0,exception); new_images=DestroyImageList(new_images); new_images=clone_image; } mask_image=DestroyImage(mask_image); } source_image=DestroyImage(source_image); *images=DestroyImageList(*images); *images=new_images; break; } if (LocaleCompare("copy",option+1) == 0) { Image *source_image; OffsetInfo offset; RectangleInfo geometry; /* Copy image pixels. */ (void) SyncImageSettings(mogrify_info,*images,exception); (void) ParsePageGeometry(*images,argv[i+2],&geometry,exception); offset.x=geometry.x; offset.y=geometry.y; source_image=(*images); if (source_image->next != (Image *) NULL) source_image=source_image->next; (void) ParsePageGeometry(source_image,argv[i+1],&geometry, exception); status=CopyImagePixels(*images,source_image,&geometry,&offset, exception); break; } break; } case 'd': { if (LocaleCompare("deconstruct",option+1) == 0) { Image *deconstruct_image; (void) SyncImagesSettings(mogrify_info,*images,exception); deconstruct_image=CompareImagesLayers(*images,CompareAnyLayer, exception); if (deconstruct_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=deconstruct_image; break; } if (LocaleCompare("delete",option+1) == 0) { if (*option == '+') DeleteImages(images,"-1",exception); else DeleteImages(images,argv[i+1],exception); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { quantize_info->dither_method=NoDitherMethod; break; } quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("duplicate",option+1) == 0) { Image *duplicate_images; if (*option == '+') duplicate_images=DuplicateImages(*images,1,"-1",exception); else { const char *p; size_t number_duplicates; number_duplicates=(size_t) StringToLong(argv[i+1]); p=strchr(argv[i+1],','); if (p == (const char *) NULL) duplicate_images=DuplicateImages(*images,number_duplicates, "-1",exception); else duplicate_images=DuplicateImages(*images,number_duplicates,p, exception); } AppendImageToList(images, duplicate_images); (void) SyncImagesSettings(mogrify_info,*images,exception); break; } break; } case 'e': { if (LocaleCompare("evaluate-sequence",option+1) == 0) { Image *evaluate_image; MagickEvaluateOperator op; (void) SyncImageSettings(mogrify_info,*images,exception); op=(MagickEvaluateOperator) ParseCommandOption( MagickEvaluateOptions,MagickFalse,argv[i+1]); evaluate_image=EvaluateImages(*images,op,exception); if (evaluate_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=evaluate_image; break; } break; } case 'f': { if (LocaleCompare("fft",option+1) == 0) { Image *fourier_image; /* Implements the discrete Fourier transform (DFT). */ (void) SyncImageSettings(mogrify_info,*images,exception); fourier_image=ForwardFourierTransformImage(*images,*option == '-' ? MagickTrue : MagickFalse,exception); if (fourier_image == (Image *) NULL) break; *images=DestroyImageList(*images); *images=fourier_image; break; } if (LocaleCompare("flatten",option+1) == 0) { Image *flatten_image; (void) SyncImagesSettings(mogrify_info,*images,exception); flatten_image=MergeImageLayers(*images,FlattenLayer,exception); if (flatten_image == (Image *) NULL) break; *images=DestroyImageList(*images); *images=flatten_image; break; } if (LocaleCompare("fx",option+1) == 0) { Image *fx_image; (void) SyncImagesSettings(mogrify_info,*images,exception); fx_image=FxImage(*images,argv[i+1],exception); if (fx_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=fx_image; break; } break; } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) { Image *hald_image, *image; (void) SyncImagesSettings(mogrify_info,*images,exception); image=RemoveFirstImageFromList(images); hald_image=RemoveFirstImageFromList(images); if (hald_image == (Image *) NULL) { status=MagickFalse; break; } (void) HaldClutImage(image,hald_image,exception); hald_image=DestroyImage(hald_image); if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=image; break; } break; } case 'i': { if (LocaleCompare("ift",option+1) == 0) { Image *fourier_image, *magnitude_image, *phase_image; /* Implements the inverse fourier discrete Fourier transform (DFT). */ (void) SyncImagesSettings(mogrify_info,*images,exception); magnitude_image=RemoveFirstImageFromList(images); phase_image=RemoveFirstImageFromList(images); if (phase_image == (Image *) NULL) { status=MagickFalse; break; } fourier_image=InverseFourierTransformImage(magnitude_image, phase_image,*option == '-' ? MagickTrue : MagickFalse,exception); if (fourier_image == (Image *) NULL) break; if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=fourier_image; break; } if (LocaleCompare("insert",option+1) == 0) { Image *p, *q; index=0; if (*option != '+') index=(ssize_t) StringToLong(argv[i+1]); p=RemoveLastImageFromList(images); if (p == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",argv[i+1]); status=MagickFalse; break; } q=p; if (index == 0) PrependImageToList(images,q); else if (index == (ssize_t) GetImageListLength(*images)) AppendImageToList(images,q); else { q=GetImageFromList(*images,index-1); if (q == (Image *) NULL) { p=DestroyImage(p); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",argv[i+1]); status=MagickFalse; break; } InsertImageInList(&q,p); } *images=GetFirstImageInList(q); break; } if (LocaleCompare("interpolate",option+1) == 0) { interpolate_method=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,argv[i+1]); break; } break; } case 'l': { if (LocaleCompare("layers",option+1) == 0) { Image *layers; LayerMethod method; (void) SyncImagesSettings(mogrify_info,*images,exception); layers=(Image *) NULL; method=(LayerMethod) ParseCommandOption(MagickLayerOptions, MagickFalse,argv[i+1]); switch (method) { case CoalesceLayer: { layers=CoalesceImages(*images,exception); break; } case CompareAnyLayer: case CompareClearLayer: case CompareOverlayLayer: default: { layers=CompareImagesLayers(*images,method,exception); break; } case MergeLayer: case FlattenLayer: case MosaicLayer: case TrimBoundsLayer: { layers=MergeImageLayers(*images,method,exception); break; } case DisposeLayer: { layers=DisposeImages(*images,exception); break; } case OptimizeImageLayer: { layers=OptimizeImageLayers(*images,exception); break; } case OptimizePlusLayer: { layers=OptimizePlusImageLayers(*images,exception); break; } case OptimizeTransLayer: { OptimizeImageTransparency(*images,exception); break; } case RemoveDupsLayer: { RemoveDuplicateLayers(images,exception); break; } case RemoveZeroLayer: { RemoveZeroDelayLayers(images,exception); break; } case OptimizeLayer: { /* General Purpose, GIF Animation Optimizer. */ layers=CoalesceImages(*images,exception); if (layers == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=layers; layers=OptimizeImageLayers(*images,exception); if (layers == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=layers; layers=(Image *) NULL; OptimizeImageTransparency(*images,exception); (void) RemapImages(quantize_info,*images,(Image *) NULL, exception); break; } case CompositeLayer: { CompositeOperator compose; Image *source; RectangleInfo geometry; /* Split image sequence at the first 'NULL:' image. */ source=(*images); while (source != (Image *) NULL) { source=GetNextImageInList(source); if ((source != (Image *) NULL) && (LocaleCompare(source->magick,"NULL") == 0)) break; } if (source != (Image *) NULL) { if ((GetPreviousImageInList(source) == (Image *) NULL) || (GetNextImageInList(source) == (Image *) NULL)) source=(Image *) NULL; else { /* Separate the two lists, junk the null: image. */ source=SplitImageList(source->previous); DeleteImageFromList(&source); } } if (source == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"MissingNullSeparator","layers Composite"); status=MagickFalse; break; } /* Adjust offset with gravity and virtual canvas. */ SetGeometry(*images,&geometry); (void) ParseAbsoluteGeometry((*images)->geometry,&geometry); geometry.width=source->page.width != 0 ? source->page.width : source->columns; geometry.height=source->page.height != 0 ? source->page.height : source->rows; GravityAdjustGeometry((*images)->page.width != 0 ? (*images)->page.width : (*images)->columns, (*images)->page.height != 0 ? (*images)->page.height : (*images)->rows,(*images)->gravity,&geometry); compose=OverCompositeOp; option=GetImageOption(mogrify_info,"compose"); if (option != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,option); CompositeLayers(*images,compose,source,geometry.x,geometry.y, exception); source=DestroyImageList(source); break; } } if (layers == (Image *) NULL) break; *images=DestroyImageList(*images); *images=layers; break; } break; } case 'm': { if (LocaleCompare("map",option+1) == 0) { (void) SyncImagesSettings(mogrify_info,*images,exception); if (*option == '+') { (void) RemapImages(quantize_info,*images,(Image *) NULL, exception); break; } i++; break; } if (LocaleCompare("maximum",option+1) == 0) { Image *maximum_image; /* Maximum image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images,exception); maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception); if (maximum_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=maximum_image; break; } if (LocaleCompare("minimum",option+1) == 0) { Image *minimum_image; /* Minimum image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images,exception); minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception); if (minimum_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=minimum_image; break; } if (LocaleCompare("morph",option+1) == 0) { Image *morph_image; (void) SyncImagesSettings(mogrify_info,*images,exception); morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]), exception); if (morph_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=morph_image; break; } if (LocaleCompare("mosaic",option+1) == 0) { Image *mosaic_image; (void) SyncImagesSettings(mogrify_info,*images,exception); mosaic_image=MergeImageLayers(*images,MosaicLayer,exception); if (mosaic_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=mosaic_image; break; } break; } case 'p': { if (LocaleCompare("poly",option+1) == 0) { char *args, token[MagickPathExtent]; const char *p; double *arguments; Image *polynomial_image; register ssize_t x; size_t number_arguments; /* Polynomial image. */ (void) SyncImageSettings(mogrify_info,*images,exception); args=InterpretImageProperties(mogrify_info,*images,argv[i+1], exception); if (args == (char *) NULL) break; p=(char *) args; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } number_arguments=(size_t) x; arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*arguments)); if (arguments == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*images)->filename); (void) memset(arguments,0,number_arguments* sizeof(*arguments)); p=(char *) args; for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arguments[x]=StringToDouble(token,(char **) NULL); } args=DestroyString(args); polynomial_image=PolynomialImage(*images,number_arguments >> 1, arguments,exception); arguments=(double *) RelinquishMagickMemory(arguments); if (polynomial_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=polynomial_image; } if (LocaleCompare("print",option+1) == 0) { char *string; (void) SyncImagesSettings(mogrify_info,*images,exception); string=InterpretImageProperties(mogrify_info,*images,argv[i+1], exception); if (string == (char *) NULL) break; (void) FormatLocaleFile(stdout,"%s",string); string=DestroyString(string); } if (LocaleCompare("process",option+1) == 0) { char **arguments; int j, number_arguments; (void) SyncImagesSettings(mogrify_info,*images,exception); arguments=StringToArgv(argv[i+1],&number_arguments); if (arguments == (char **) NULL) break; if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL)) { char breaker, quote, *token; const char *argument; int next, token_status; size_t length; TokenInfo *token_info; /* Support old style syntax, filter="-option arg". */ length=strlen(argv[i+1]); token=(char *) NULL; if (~length >= (MagickPathExtent-1)) token=(char *) AcquireQuantumMemory(length+MagickPathExtent, sizeof(*token)); if (token == (char *) NULL) break; next=0; argument=argv[i+1]; token_info=AcquireTokenInfo(); token_status=Tokenizer(token_info,0,token,length,argument,"", "=","\"",'\0',&breaker,&next,&quote); token_info=DestroyTokenInfo(token_info); if (token_status == 0) { const char *arg; arg=(&(argument[next])); (void) InvokeDynamicImageFilter(token,&(*images),1,&arg, exception); } token=DestroyString(token); break; } (void) SubstituteString(&arguments[1],"-",""); (void) InvokeDynamicImageFilter(arguments[1],&(*images), number_arguments-2,(const char **) arguments+2,exception); for (j=0; j < number_arguments; j++) arguments[j]=DestroyString(arguments[j]); arguments=(char **) RelinquishMagickMemory(arguments); break; } break; } case 'r': { if (LocaleCompare("reverse",option+1) == 0) { ReverseImageList(images); break; } break; } case 's': { if (LocaleCompare("smush",option+1) == 0) { Image *smush_image; ssize_t offset; (void) SyncImagesSettings(mogrify_info,*images,exception); offset=(ssize_t) StringToLong(argv[i+1]); smush_image=SmushImages(*images,*option == '-' ? MagickTrue : MagickFalse,offset,exception); if (smush_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=smush_image; break; } if (LocaleCompare("swap",option+1) == 0) { Image *p, *q, *u, *v; ssize_t swap_index; index=(-1); swap_index=(-2); if (*option != '+') { GeometryInfo geometry_info; MagickStatusType flags; swap_index=(-1); flags=ParseGeometry(argv[i+1],&geometry_info); index=(ssize_t) geometry_info.rho; if ((flags & SigmaValue) != 0) swap_index=(ssize_t) geometry_info.sigma; } p=GetImageFromList(*images,index); q=GetImageFromList(*images,swap_index); if ((p == (Image *) NULL) || (q == (Image *) NULL)) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",(*images)->filename); status=MagickFalse; break; } if (p == q) break; u=CloneImage(p,0,0,MagickTrue,exception); if (u == (Image *) NULL) break; v=CloneImage(q,0,0,MagickTrue,exception); if (v == (Image *) NULL) { u=DestroyImage(u); break; } ReplaceImageInList(&p,v); ReplaceImageInList(&q,u); *images=GetFirstImageInList(q); break; } break; } case 'w': { if (LocaleCompare("write",option+1) == 0) { char key[MagickPathExtent]; Image *write_images; ImageInfo *write_info; (void) SyncImagesSettings(mogrify_info,*images,exception); (void) FormatLocaleString(key,MagickPathExtent,"cache:%s", argv[i+1]); (void) DeleteImageRegistry(key); write_images=(*images); if (*option == '+') write_images=CloneImageList(*images,exception); write_info=CloneImageInfo(mogrify_info); status&=WriteImages(write_info,write_images,argv[i+1],exception); write_info=DestroyImageInfo(write_info); if (*option == '+') write_images=DestroyImageList(write_images); break; } break; } default: break; } i+=count; } quantize_info=DestroyQuantizeInfo(quantize_info); mogrify_info=DestroyImageInfo(mogrify_info); status&=MogrifyImageInfo(image_info,argc,argv,exception); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImages() applies image processing options to a sequence of images as % prescribed by command line options. % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImages(ImageInfo *image_info, % const MagickBooleanType post,const int argc,const char **argv, % Image **images,Exceptioninfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o post: If true, post process image list operators otherwise pre-process. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o images: pointer to a pointer of the first image in image list. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImages(ImageInfo *image_info, const MagickBooleanType post,const int argc,const char **argv, Image **images,ExceptionInfo *exception) { #define MogrifyImageTag "Mogrify/Image" MagickStatusType status; MagickBooleanType proceed; size_t n; register ssize_t i; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (images == (Image **) NULL) return(MogrifyImage(image_info,argc,argv,images,exception)); assert((*images)->previous == (Image *) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); if ((argc <= 0) || (*argv == (char *) NULL)) return(MagickTrue); (void) SetImageInfoProgressMonitor(image_info,(MagickProgressMonitor) NULL, (void *) NULL); status=MagickTrue; #if 0 (void) FormatLocaleFile(stderr, "mogrify start %s %d (%s)\n",argv[0],argc, post?"post":"pre"); #endif /* Pre-process multi-image sequence operators */ if (post == MagickFalse) status&=MogrifyImageList(image_info,argc,argv,images,exception); /* For each image, process simple single image operators */ i=0; n=GetImageListLength(*images); for ( ; ; ) { #if 0 (void) FormatLocaleFile(stderr,"mogrify %ld of %ld\n",(long) GetImageIndexInList(*images),(long)GetImageListLength(*images)); #endif status&=MogrifyImage(image_info,argc,argv,images,exception); proceed=SetImageProgress(*images,MogrifyImageTag,(MagickOffsetType) i, n); if (proceed == MagickFalse) break; if ( (*images)->next == (Image *) NULL ) break; *images=(*images)->next; i++; } assert( *images != (Image *) NULL ); #if 0 (void) FormatLocaleFile(stderr,"mogrify end %ld of %ld\n",(long) GetImageIndexInList(*images),(long)GetImageListLength(*images)); #endif /* Post-process, multi-image sequence operators */ *images=GetFirstImageInList(*images); if (post != MagickFalse) status&=MogrifyImageList(image_info,argc,argv,images,exception); return(status != 0 ? MagickTrue : MagickFalse); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_946_0
crossvul-cpp_data_good_2171_1
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * This module provides the abstraction for an SCTP association. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Hui Huang <hui.huang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ryan Layer <rmlayer@us.ibm.com> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/in.h> #include <net/ipv6.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Forward declarations for internal functions. */ static void sctp_select_active_and_retran_path(struct sctp_association *asoc); static void sctp_assoc_bh_rcv(struct work_struct *work); static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); /* 1st Level Abstractions. */ /* Initialize a new association from provided memory. */ static struct sctp_association *sctp_association_init(struct sctp_association *asoc, const struct sctp_endpoint *ep, const struct sock *sk, sctp_scope_t scope, gfp_t gfp) { struct net *net = sock_net(sk); struct sctp_sock *sp; int i; sctp_paramhdr_t *p; int err; /* Retrieve the SCTP per socket area. */ sp = sctp_sk((struct sock *)sk); /* Discarding const is appropriate here. */ asoc->ep = (struct sctp_endpoint *)ep; asoc->base.sk = (struct sock *)sk; sctp_endpoint_hold(asoc->ep); sock_hold(asoc->base.sk); /* Initialize the common base substructure. */ asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; /* Initialize the object handling fields. */ atomic_set(&asoc->base.refcnt, 1); /* Initialize the bind addr area. */ sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); asoc->state = SCTP_STATE_CLOSED; asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life); asoc->user_frag = sp->user_frag; /* Set the association max_retrans and RTO values from the * socket values. */ asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; asoc->pf_retrans = net->sctp.pf_retrans; asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); /* Initialize the association's heartbeat interval based on the * sock configured value. */ asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); /* Initialize path max retrans value. */ asoc->pathmaxrxt = sp->pathmaxrxt; /* Initialize default path MTU. */ asoc->pathmtu = sp->pathmtu; /* Set association default SACK delay */ asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); asoc->sackfreq = sp->sackfreq; /* Set the association default flags controlling * Heartbeat, SACK delay, and Path MTU Discovery. */ asoc->param_flags = sp->param_flags; /* Initialize the maximum number of new data packets that can be sent * in a burst. */ asoc->max_burst = sp->max_burst; /* initialize association timers */ asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; /* sctpimpguide Section 2.12.2 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the * recommended value of 5 times 'RTO.Max'. */ asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] = 5 * asoc->rto_max; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; /* Initializes the timers */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) setup_timer(&asoc->timers[i], sctp_timer_events[i], (unsigned long)asoc); /* Pull default initialization values from the sock options. * Note: This assumes that the values have already been * validated in the sock. */ asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; asoc->max_init_timeo = msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); /* Set the local window size for receive. * This is also the rcvbuf space per association. * RFC 6 - A SCTP receiver MUST be able to receive a minimum of * 1500 bytes in one SCTP packet. */ if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) asoc->rwnd = SCTP_DEFAULT_MINWINDOW; else asoc->rwnd = sk->sk_rcvbuf/2; asoc->a_rwnd = asoc->rwnd; /* Use my own max window until I learn something better. */ asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; /* Initialize the receive memory counter */ atomic_set(&asoc->rmem_alloc, 0); init_waitqueue_head(&asoc->wait); asoc->c.my_vtag = sctp_generate_tag(ep); asoc->c.my_port = ep->base.bind_addr.port; asoc->c.initial_tsn = sctp_generate_tsn(ep); asoc->next_tsn = asoc->c.initial_tsn; asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->adv_peer_ack_point = asoc->ctsn_ack_point; asoc->highest_sacked = asoc->ctsn_ack_point; asoc->last_cwr_tsn = asoc->ctsn_ack_point; /* ADDIP Section 4.1 Asconf Chunk Procedures * * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do the following: * ... * A2) a serial number should be assigned to the chunk. The serial * number SHOULD be a monotonically increasing number. The serial * numbers SHOULD be initialized at the start of the * association to the same value as the initial TSN. */ asoc->addip_serial = asoc->c.initial_tsn; INIT_LIST_HEAD(&asoc->addip_chunk_list); INIT_LIST_HEAD(&asoc->asconf_ack_list); /* Make an empty list of remote transport addresses. */ INIT_LIST_HEAD(&asoc->peer.transport_addr_list); /* RFC 2960 5.1 Normal Establishment of an Association * * After the reception of the first data chunk in an * association the endpoint must immediately respond with a * sack to acknowledge the data chunk. Subsequent * acknowledgements should be done as described in Section * 6.2. * * [We implement this by telling a new association that it * already received one packet.] */ asoc->peer.sack_needed = 1; asoc->peer.sack_generation = 1; /* Assume that the peer will tell us if he recognizes ASCONF * as part of INIT exchange. * The sctp_addip_noauth option is there for backward compatibility * and will revert old behavior. */ if (net->sctp.addip_noauth) asoc->peer.asconf_capable = 1; /* Create an input queue. */ sctp_inq_init(&asoc->base.inqueue); sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); /* Create an output queue. */ sctp_outq_init(asoc, &asoc->outqueue); if (!sctp_ulpq_init(&asoc->ulpq, asoc)) goto fail_init; /* Assume that peer would support both address types unless we are * told otherwise. */ asoc->peer.ipv4_address = 1; if (asoc->base.sk->sk_family == PF_INET6) asoc->peer.ipv6_address = 1; INIT_LIST_HEAD(&asoc->asocs); asoc->default_stream = sp->default_stream; asoc->default_ppid = sp->default_ppid; asoc->default_flags = sp->default_flags; asoc->default_context = sp->default_context; asoc->default_timetolive = sp->default_timetolive; asoc->default_rcv_context = sp->default_rcv_context; /* AUTH related initializations */ INIT_LIST_HEAD(&asoc->endpoint_shared_keys); err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); if (err) goto fail_init; asoc->active_key_id = ep->active_key_id; /* Save the hmacs and chunks list into this association */ if (ep->auth_hmacs_list) memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, ntohs(ep->auth_hmacs_list->param_hdr.length)); if (ep->auth_chunk_list) memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, ntohs(ep->auth_chunk_list->param_hdr.length)); /* Get the AUTH random number for this association */ p = (sctp_paramhdr_t *)asoc->c.auth_random; p->type = SCTP_PARAM_RANDOM; p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH); get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); return asoc; fail_init: sock_put(asoc->base.sk); sctp_endpoint_put(asoc->ep); return NULL; } /* Allocate and initialize a new association */ struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, const struct sock *sk, sctp_scope_t scope, gfp_t gfp) { struct sctp_association *asoc; asoc = kzalloc(sizeof(*asoc), gfp); if (!asoc) goto fail; if (!sctp_association_init(asoc, ep, sk, scope, gfp)) goto fail_init; SCTP_DBG_OBJCNT_INC(assoc); pr_debug("Created asoc %p\n", asoc); return asoc; fail_init: kfree(asoc); fail: return NULL; } /* Free this association if possible. There may still be users, so * the actual deallocation may be delayed. */ void sctp_association_free(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; struct sctp_transport *transport; struct list_head *pos, *temp; int i; /* Only real associations count against the endpoint, so * don't bother for if this is a temporary association. */ if (!list_empty(&asoc->asocs)) { list_del(&asoc->asocs); /* Decrement the backlog value for a TCP-style listening * socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) sk->sk_ack_backlog--; } /* Mark as dead, so other users can know this structure is * going away. */ asoc->base.dead = true; /* Dispose of any data lying around in the outqueue. */ sctp_outq_free(&asoc->outqueue); /* Dispose of any pending messages for the upper layer. */ sctp_ulpq_free(&asoc->ulpq); /* Dispose of any pending chunks on the inqueue. */ sctp_inq_free(&asoc->base.inqueue); sctp_tsnmap_free(&asoc->peer.tsn_map); /* Free ssnmap storage. */ sctp_ssnmap_free(asoc->ssnmap); /* Clean up the bound address list. */ sctp_bind_addr_free(&asoc->base.bind_addr); /* Do we need to go through all of our timers and * delete them? To be safe we will try to delete all, but we * should be able to go through and make a guess based * on our state. */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { if (del_timer(&asoc->timers[i])) sctp_association_put(asoc); } /* Free peer's cached cookie. */ kfree(asoc->peer.cookie); kfree(asoc->peer.peer_random); kfree(asoc->peer.peer_chunks); kfree(asoc->peer.peer_hmacs); /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); list_del_rcu(pos); sctp_transport_free(transport); } asoc->peer.transport_count = 0; sctp_asconf_queue_teardown(asoc); /* Free pending address space being deleted */ if (asoc->asconf_addr_del_pending != NULL) kfree(asoc->asconf_addr_del_pending); /* AUTH - Free the endpoint shared keys */ sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); /* AUTH - Free the association shared key */ sctp_auth_key_put(asoc->asoc_shared_key); sctp_association_put(asoc); } /* Cleanup and free up an association. */ static void sctp_association_destroy(struct sctp_association *asoc) { if (unlikely(!asoc->base.dead)) { WARN(1, "Attempt to destroy undead association %p!\n", asoc); return; } sctp_endpoint_put(asoc->ep); sock_put(asoc->base.sk); if (asoc->assoc_id != 0) { spin_lock_bh(&sctp_assocs_id_lock); idr_remove(&sctp_assocs_id, asoc->assoc_id); spin_unlock_bh(&sctp_assocs_id_lock); } WARN_ON(atomic_read(&asoc->rmem_alloc)); kfree(asoc); SCTP_DBG_OBJCNT_DEC(assoc); } /* Change the primary destination address for the peer. */ void sctp_assoc_set_primary(struct sctp_association *asoc, struct sctp_transport *transport) { int changeover = 0; /* it's a changeover only if we already have a primary path * that we are changing */ if (asoc->peer.primary_path != NULL && asoc->peer.primary_path != transport) changeover = 1 ; asoc->peer.primary_path = transport; /* Set a default msg_name for events. */ memcpy(&asoc->peer.primary_addr, &transport->ipaddr, sizeof(union sctp_addr)); /* If the primary path is changing, assume that the * user wants to use this new path. */ if ((transport->state == SCTP_ACTIVE) || (transport->state == SCTP_UNKNOWN)) asoc->peer.active_path = transport; /* * SFR-CACC algorithm: * Upon the receipt of a request to change the primary * destination address, on the data structure for the new * primary destination, the sender MUST do the following: * * 1) If CHANGEOVER_ACTIVE is set, then there was a switch * to this destination address earlier. The sender MUST set * CYCLING_CHANGEOVER to indicate that this switch is a * double switch to the same destination address. * * Really, only bother is we have data queued or outstanding on * the association. */ if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen) return; if (transport->cacc.changeover_active) transport->cacc.cycling_changeover = changeover; /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that * a changeover has occurred. */ transport->cacc.changeover_active = changeover; /* 3) The sender MUST store the next TSN to be sent in * next_tsn_at_change. */ transport->cacc.next_tsn_at_change = asoc->next_tsn; } /* Remove a transport from an association. */ void sctp_assoc_rm_peer(struct sctp_association *asoc, struct sctp_transport *peer) { struct list_head *pos; struct sctp_transport *transport; pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc, &peer->ipaddr.sa); /* If we are to remove the current retran_path, update it * to the next peer before removing this peer from the list. */ if (asoc->peer.retran_path == peer) sctp_assoc_update_retran_path(asoc); /* Remove this peer from the list. */ list_del_rcu(&peer->transports); /* Get the first transport of asoc. */ pos = asoc->peer.transport_addr_list.next; transport = list_entry(pos, struct sctp_transport, transports); /* Update any entries that match the peer to be deleted. */ if (asoc->peer.primary_path == peer) sctp_assoc_set_primary(asoc, transport); if (asoc->peer.active_path == peer) asoc->peer.active_path = transport; if (asoc->peer.retran_path == peer) asoc->peer.retran_path = transport; if (asoc->peer.last_data_from == peer) asoc->peer.last_data_from = transport; /* If we remove the transport an INIT was last sent to, set it to * NULL. Combined with the update of the retran path above, this * will cause the next INIT to be sent to the next available * transport, maintaining the cycle. */ if (asoc->init_last_sent_to == peer) asoc->init_last_sent_to = NULL; /* If we remove the transport an SHUTDOWN was last sent to, set it * to NULL. Combined with the update of the retran path above, this * will cause the next SHUTDOWN to be sent to the next available * transport, maintaining the cycle. */ if (asoc->shutdown_last_sent_to == peer) asoc->shutdown_last_sent_to = NULL; /* If we remove the transport an ASCONF was last sent to, set it to * NULL. */ if (asoc->addip_last_asconf && asoc->addip_last_asconf->transport == peer) asoc->addip_last_asconf->transport = NULL; /* If we have something on the transmitted list, we have to * save it off. The best place is the active path. */ if (!list_empty(&peer->transmitted)) { struct sctp_transport *active = asoc->peer.active_path; struct sctp_chunk *ch; /* Reset the transport of each chunk on this list */ list_for_each_entry(ch, &peer->transmitted, transmitted_list) { ch->transport = NULL; ch->rtt_in_progress = 0; } list_splice_tail_init(&peer->transmitted, &active->transmitted); /* Start a T3 timer here in case it wasn't running so * that these migrated packets have a chance to get * retransmitted. */ if (!timer_pending(&active->T3_rtx_timer)) if (!mod_timer(&active->T3_rtx_timer, jiffies + active->rto)) sctp_transport_hold(active); } asoc->peer.transport_count--; sctp_transport_free(peer); } /* Add a transport address to an association. */ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, const union sctp_addr *addr, const gfp_t gfp, const int peer_state) { struct net *net = sock_net(asoc->base.sk); struct sctp_transport *peer; struct sctp_sock *sp; unsigned short port; sp = sctp_sk(asoc->base.sk); /* AF_INET and AF_INET6 share common port field. */ port = ntohs(addr->v4.sin_port); pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__, asoc, &addr->sa, peer_state); /* Set the port if it has not been set yet. */ if (0 == asoc->peer.port) asoc->peer.port = port; /* Check to see if this is a duplicate. */ peer = sctp_assoc_lookup_paddr(asoc, addr); if (peer) { /* An UNKNOWN state is only set on transports added by * user in sctp_connectx() call. Such transports should be * considered CONFIRMED per RFC 4960, Section 5.4. */ if (peer->state == SCTP_UNKNOWN) { peer->state = SCTP_ACTIVE; } return peer; } peer = sctp_transport_new(net, addr, gfp); if (!peer) return NULL; sctp_transport_set_owner(peer, asoc); /* Initialize the peer's heartbeat interval based on the * association configured value. */ peer->hbinterval = asoc->hbinterval; /* Set the path max_retrans. */ peer->pathmaxrxt = asoc->pathmaxrxt; /* And the partial failure retrans threshold */ peer->pf_retrans = asoc->pf_retrans; /* Initialize the peer's SACK delay timeout based on the * association configured value. */ peer->sackdelay = asoc->sackdelay; peer->sackfreq = asoc->sackfreq; /* Enable/disable heartbeat, SACK delay, and path MTU discovery * based on association setting. */ peer->param_flags = asoc->param_flags; sctp_transport_route(peer, NULL, sp); /* Initialize the pmtu of the transport. */ if (peer->param_flags & SPP_PMTUD_DISABLE) { if (asoc->pathmtu) peer->pathmtu = asoc->pathmtu; else peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } /* If this is the first transport addr on this association, * initialize the association PMTU to the peer's PMTU. * If not and the current association PMTU is higher than the new * peer's PMTU, reset the association PMTU to the new peer's PMTU. */ if (asoc->pathmtu) asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); else asoc->pathmtu = peer->pathmtu; pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc, asoc->pathmtu); peer->pmtu_pending = 0; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); /* The asoc->peer.port might not be meaningful yet, but * initialize the packet structure anyway. */ sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, asoc->peer.port); /* 7.2.1 Slow-Start * * o The initial cwnd before DATA transmission or after a sufficiently * long idle period MUST be set to * min(4*MTU, max(2*MTU, 4380 bytes)) * * o The initial value of ssthresh MAY be arbitrarily high * (for example, implementations MAY use the size of the * receiver advertised window). */ peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); /* At this point, we may not have the receiver's advertised window, * so initialize ssthresh to the default value and it will be set * later when we process the INIT. */ peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; peer->partial_bytes_acked = 0; peer->flight_size = 0; peer->burst_limited = 0; /* Set the transport's RTO.initial value */ peer->rto = asoc->rto_initial; sctp_max_rto(asoc, peer); /* Set the peer's active state. */ peer->state = peer_state; /* Attach the remote transport to our asoc. */ list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list); asoc->peer.transport_count++; /* If we do not yet have a primary path, set one. */ if (!asoc->peer.primary_path) { sctp_assoc_set_primary(asoc, peer); asoc->peer.retran_path = peer; } if (asoc->peer.active_path == asoc->peer.retran_path && peer->state != SCTP_UNCONFIRMED) { asoc->peer.retran_path = peer; } return peer; } /* Delete a transport address from an association. */ void sctp_assoc_del_peer(struct sctp_association *asoc, const union sctp_addr *addr) { struct list_head *pos; struct list_head *temp; struct sctp_transport *transport; list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { /* Do book keeping for removing the peer and free it. */ sctp_assoc_rm_peer(asoc, transport); break; } } } /* Lookup a transport by address. */ struct sctp_transport *sctp_assoc_lookup_paddr( const struct sctp_association *asoc, const union sctp_addr *address) { struct sctp_transport *t; /* Cycle through all transports searching for a peer address. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (sctp_cmp_addr_exact(address, &t->ipaddr)) return t; } return NULL; } /* Remove all transports except a give one */ void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, struct sctp_transport *primary) { struct sctp_transport *temp; struct sctp_transport *t; list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list, transports) { /* if the current transport is not the primary one, delete it */ if (t != primary) sctp_assoc_rm_peer(asoc, t); } } /* Engage in transport control operations. * Mark the transport up or down and send a notification to the user. * Select and update the new active and retran paths. */ void sctp_assoc_control_transport(struct sctp_association *asoc, struct sctp_transport *transport, sctp_transport_cmd_t command, sctp_sn_error_t error) { struct sctp_ulpevent *event; struct sockaddr_storage addr; int spc_state = 0; bool ulp_notify = true; /* Record the transition on the transport. */ switch (command) { case SCTP_TRANSPORT_UP: /* If we are moving from UNCONFIRMED state due * to heartbeat success, report the SCTP_ADDR_CONFIRMED * state to the user, otherwise report SCTP_ADDR_AVAILABLE. */ if (SCTP_UNCONFIRMED == transport->state && SCTP_HEARTBEAT_SUCCESS == error) spc_state = SCTP_ADDR_CONFIRMED; else spc_state = SCTP_ADDR_AVAILABLE; /* Don't inform ULP about transition from PF to * active state and set cwnd to 1 MTU, see SCTP * Quick failover draft section 5.1, point 5 */ if (transport->state == SCTP_PF) { ulp_notify = false; transport->cwnd = asoc->pathmtu; } transport->state = SCTP_ACTIVE; break; case SCTP_TRANSPORT_DOWN: /* If the transport was never confirmed, do not transition it * to inactive state. Also, release the cached route since * there may be a better route next time. */ if (transport->state != SCTP_UNCONFIRMED) transport->state = SCTP_INACTIVE; else { dst_release(transport->dst); transport->dst = NULL; ulp_notify = false; } spc_state = SCTP_ADDR_UNREACHABLE; break; case SCTP_TRANSPORT_PF: transport->state = SCTP_PF; ulp_notify = false; break; default: return; } /* Generate and send a SCTP_PEER_ADDR_CHANGE notification * to the user. */ if (ulp_notify) { memset(&addr, 0, sizeof(struct sockaddr_storage)); memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, 0, spc_state, error, GFP_ATOMIC); if (event) sctp_ulpq_tail_event(&asoc->ulpq, event); } /* Select new active and retran paths. */ sctp_select_active_and_retran_path(asoc); } /* Hold a reference to an association. */ void sctp_association_hold(struct sctp_association *asoc) { atomic_inc(&asoc->base.refcnt); } /* Release a reference to an association and cleanup * if there are no more references. */ void sctp_association_put(struct sctp_association *asoc) { if (atomic_dec_and_test(&asoc->base.refcnt)) sctp_association_destroy(asoc); } /* Allocate the next TSN, Transmission Sequence Number, for the given * association. */ __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) { /* From Section 1.6 Serial Number Arithmetic: * Transmission Sequence Numbers wrap around when they reach * 2**32 - 1. That is, the next TSN a DATA chunk MUST use * after transmitting TSN = 2*32 - 1 is TSN = 0. */ __u32 retval = asoc->next_tsn; asoc->next_tsn++; asoc->unack_data++; return retval; } /* Compare two addresses to see if they match. Wildcard addresses * only match themselves. */ int sctp_cmp_addr_exact(const union sctp_addr *ss1, const union sctp_addr *ss2) { struct sctp_af *af; af = sctp_get_af_specific(ss1->sa.sa_family); if (unlikely(!af)) return 0; return af->cmp_addr(ss1, ss2); } /* Return an ecne chunk to get prepended to a packet. * Note: We are sly and return a shared, prealloced chunk. FIXME: * No we don't, but we could/should. */ struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) { if (!asoc->need_ecne) return NULL; /* Send ECNE if needed. * Not being able to allocate a chunk here is not deadly. */ return sctp_make_ecne(asoc, asoc->last_ecne_tsn); } /* * Find which transport this TSN was sent on. */ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, __u32 tsn) { struct sctp_transport *active; struct sctp_transport *match; struct sctp_transport *transport; struct sctp_chunk *chunk; __be32 key = htonl(tsn); match = NULL; /* * FIXME: In general, find a more efficient data structure for * searching. */ /* * The general strategy is to search each transport's transmitted * list. Return which transport this TSN lives on. * * Let's be hopeful and check the active_path first. * Another optimization would be to know if there is only one * outbound path and not have to look for the TSN at all. * */ active = asoc->peer.active_path; list_for_each_entry(chunk, &active->transmitted, transmitted_list) { if (key == chunk->subh.data_hdr->tsn) { match = active; goto out; } } /* If not found, go search all the other transports. */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { if (transport == active) continue; list_for_each_entry(chunk, &transport->transmitted, transmitted_list) { if (key == chunk->subh.data_hdr->tsn) { match = transport; goto out; } } } out: return match; } /* Is this the association we are looking for? */ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr) { struct sctp_transport *transport; if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && (htons(asoc->peer.port) == paddr->v4.sin_port) && net_eq(sock_net(asoc->base.sk), net)) { transport = sctp_assoc_lookup_paddr(asoc, paddr); if (!transport) goto out; if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr, sctp_sk(asoc->base.sk))) goto out; } transport = NULL; out: return transport; } /* Do delayed input processing. This is scheduled by sctp_rcv(). */ static void sctp_assoc_bh_rcv(struct work_struct *work) { struct sctp_association *asoc = container_of(work, struct sctp_association, base.inqueue.immediate); struct net *net = sock_net(asoc->base.sk); struct sctp_endpoint *ep; struct sctp_chunk *chunk; struct sctp_inq *inqueue; int state; sctp_subtype_t subtype; int error = 0; /* The association should be held so we should be safe. */ ep = asoc->ep; inqueue = &asoc->base.inqueue; sctp_association_hold(asoc); while (NULL != (chunk = sctp_inq_pop(inqueue))) { state = asoc->state; subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); /* SCTP-AUTH, Section 6.3: * The receiver has a list of chunk types which it expects * to be received only after an AUTH-chunk. This list has * been sent to the peer during the association setup. It * MUST silently discard these chunks if they are not placed * after an AUTH chunk in the packet. */ if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) continue; /* Remember where the last DATA chunk came from so we * know where to send the SACK. */ if (sctp_chunk_is_data(chunk)) asoc->peer.last_data_from = chunk->transport; else { SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS); asoc->stats.ictrlchunks++; if (chunk->chunk_hdr->type == SCTP_CID_SACK) asoc->stats.isacks++; } if (chunk->transport) chunk->transport->last_time_heard = ktime_get(); /* Run through the state machine. */ error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state, ep, asoc, chunk, GFP_ATOMIC); /* Check to see if the association is freed in response to * the incoming chunk. If so, get out of the while loop. */ if (asoc->base.dead) break; /* If there is an error on chunk, discard this packet. */ if (error && chunk) chunk->pdiscard = 1; } sctp_association_put(asoc); } /* This routine moves an association from its old sk to a new sk. */ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) { struct sctp_sock *newsp = sctp_sk(newsk); struct sock *oldsk = assoc->base.sk; /* Delete the association from the old endpoint's list of * associations. */ list_del_init(&assoc->asocs); /* Decrement the backlog value for a TCP-style socket. */ if (sctp_style(oldsk, TCP)) oldsk->sk_ack_backlog--; /* Release references to the old endpoint and the sock. */ sctp_endpoint_put(assoc->ep); sock_put(assoc->base.sk); /* Get a reference to the new endpoint. */ assoc->ep = newsp->ep; sctp_endpoint_hold(assoc->ep); /* Get a reference to the new sock. */ assoc->base.sk = newsk; sock_hold(assoc->base.sk); /* Add the association to the new endpoint's list of associations. */ sctp_endpoint_add_asoc(newsp->ep, assoc); } /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ void sctp_assoc_update(struct sctp_association *asoc, struct sctp_association *new) { struct sctp_transport *trans; struct list_head *pos, *temp; /* Copy in new parameters of peer. */ asoc->c = new->c; asoc->peer.rwnd = new->peer.rwnd; asoc->peer.sack_needed = new->peer.sack_needed; asoc->peer.auth_capable = new->peer.auth_capable; asoc->peer.i = new->peer.i; sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, asoc->peer.i.initial_tsn, GFP_ATOMIC); /* Remove any peer addresses not present in the new association. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { trans = list_entry(pos, struct sctp_transport, transports); if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { sctp_assoc_rm_peer(asoc, trans); continue; } if (asoc->state >= SCTP_STATE_ESTABLISHED) sctp_transport_reset(trans); } /* If the case is A (association restart), use * initial_tsn as next_tsn. If the case is B, use * current next_tsn in case data sent to peer * has been discarded and needs retransmission. */ if (asoc->state >= SCTP_STATE_ESTABLISHED) { asoc->next_tsn = new->next_tsn; asoc->ctsn_ack_point = new->ctsn_ack_point; asoc->adv_peer_ack_point = new->adv_peer_ack_point; /* Reinitialize SSN for both local streams * and peer's streams. */ sctp_ssnmap_clear(asoc->ssnmap); /* Flush the ULP reassembly and ordered queue. * Any data there will now be stale and will * cause problems. */ sctp_ulpq_flush(&asoc->ulpq); /* reset the overall association error count so * that the restarted association doesn't get torn * down on the next retransmission timer. */ asoc->overall_error_count = 0; } else { /* Add any peer addresses from the new association. */ list_for_each_entry(trans, &new->peer.transport_addr_list, transports) { if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) sctp_assoc_add_peer(asoc, &trans->ipaddr, GFP_ATOMIC, trans->state); } asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->adv_peer_ack_point = asoc->ctsn_ack_point; if (!asoc->ssnmap) { /* Move the ssnmap. */ asoc->ssnmap = new->ssnmap; new->ssnmap = NULL; } if (!asoc->assoc_id) { /* get a new association id since we don't have one * yet. */ sctp_assoc_set_id(asoc, GFP_ATOMIC); } } /* SCTP-AUTH: Save the peer parameters from the new associations * and also move the association shared keys over */ kfree(asoc->peer.peer_random); asoc->peer.peer_random = new->peer.peer_random; new->peer.peer_random = NULL; kfree(asoc->peer.peer_chunks); asoc->peer.peer_chunks = new->peer.peer_chunks; new->peer.peer_chunks = NULL; kfree(asoc->peer.peer_hmacs); asoc->peer.peer_hmacs = new->peer.peer_hmacs; new->peer.peer_hmacs = NULL; sctp_auth_key_put(asoc->asoc_shared_key); sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); } /* Update the retran path for sending a retransmitted packet. * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: * * When there is outbound data to send and the primary path * becomes inactive (e.g., due to failures), or where the * SCTP user explicitly requests to send data to an * inactive destination transport address, before reporting * an error to its ULP, the SCTP endpoint should try to send * the data to an alternate active destination transport * address if one exists. * * When retransmitting data that timed out, if the endpoint * is multihomed, it should consider each source-destination * address pair in its retransmission selection policy. * When retransmitting timed-out data, the endpoint should * attempt to pick the most divergent source-destination * pair from the original source-destination pair to which * the packet was transmitted. * * Note: Rules for picking the most divergent source-destination * pair are an implementation decision and are not specified * within this document. * * Our basic strategy is to round-robin transports in priorities * according to sctp_state_prio_map[] e.g., if no such * transport with state SCTP_ACTIVE exists, round-robin through * SCTP_UNKNOWN, etc. You get the picture. */ static const u8 sctp_trans_state_to_prio_map[] = { [SCTP_ACTIVE] = 3, /* best case */ [SCTP_UNKNOWN] = 2, [SCTP_PF] = 1, [SCTP_INACTIVE] = 0, /* worst case */ }; static u8 sctp_trans_score(const struct sctp_transport *trans) { return sctp_trans_state_to_prio_map[trans->state]; } static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, struct sctp_transport *trans2) { if (trans1->error_count > trans2->error_count) { return trans2; } else if (trans1->error_count == trans2->error_count && ktime_after(trans2->last_time_heard, trans1->last_time_heard)) { return trans2; } else { return trans1; } } static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, struct sctp_transport *best) { u8 score_curr, score_best; if (best == NULL || curr == best) return curr; score_curr = sctp_trans_score(curr); score_best = sctp_trans_score(best); /* First, try a score-based selection if both transport states * differ. If we're in a tie, lets try to make a more clever * decision here based on error counts and last time heard. */ if (score_curr > score_best) return curr; else if (score_curr == score_best) return sctp_trans_elect_tie(curr, best); else return best; } void sctp_assoc_update_retran_path(struct sctp_association *asoc) { struct sctp_transport *trans = asoc->peer.retran_path; struct sctp_transport *trans_next = NULL; /* We're done as we only have the one and only path. */ if (asoc->peer.transport_count == 1) return; /* If active_path and retran_path are the same and active, * then this is the only active path. Use it. */ if (asoc->peer.active_path == asoc->peer.retran_path && asoc->peer.active_path->state == SCTP_ACTIVE) return; /* Iterate from retran_path's successor back to retran_path. */ for (trans = list_next_entry(trans, transports); 1; trans = list_next_entry(trans, transports)) { /* Manually skip the head element. */ if (&trans->transports == &asoc->peer.transport_addr_list) continue; if (trans->state == SCTP_UNCONFIRMED) continue; trans_next = sctp_trans_elect_best(trans, trans_next); /* Active is good enough for immediate return. */ if (trans_next->state == SCTP_ACTIVE) break; /* We've reached the end, time to update path. */ if (trans == asoc->peer.retran_path) break; } asoc->peer.retran_path = trans_next; pr_debug("%s: association:%p updated new path to addr:%pISpc\n", __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); } static void sctp_select_active_and_retran_path(struct sctp_association *asoc) { struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL; struct sctp_transport *trans_pf = NULL; /* Look for the two most recently used active transports. */ list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { /* Skip uninteresting transports. */ if (trans->state == SCTP_INACTIVE || trans->state == SCTP_UNCONFIRMED) continue; /* Keep track of the best PF transport from our * list in case we don't find an active one. */ if (trans->state == SCTP_PF) { trans_pf = sctp_trans_elect_best(trans, trans_pf); continue; } /* For active transports, pick the most recent ones. */ if (trans_pri == NULL || ktime_after(trans->last_time_heard, trans_pri->last_time_heard)) { trans_sec = trans_pri; trans_pri = trans; } else if (trans_sec == NULL || ktime_after(trans->last_time_heard, trans_sec->last_time_heard)) { trans_sec = trans; } } /* RFC 2960 6.4 Multi-Homed SCTP Endpoints * * By default, an endpoint should always transmit to the primary * path, unless the SCTP user explicitly specifies the * destination transport address (and possibly source transport * address) to use. [If the primary is active but not most recent, * bump the most recently used transport.] */ if ((asoc->peer.primary_path->state == SCTP_ACTIVE || asoc->peer.primary_path->state == SCTP_UNKNOWN) && asoc->peer.primary_path != trans_pri) { trans_sec = trans_pri; trans_pri = asoc->peer.primary_path; } /* We did not find anything useful for a possible retransmission * path; either primary path that we found is the the same as * the current one, or we didn't generally find an active one. */ if (trans_sec == NULL) trans_sec = trans_pri; /* If we failed to find a usable transport, just camp on the * active or pick a PF iff it's the better choice. */ if (trans_pri == NULL) { trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf); trans_sec = trans_pri; } /* Set the active and retran transports. */ asoc->peer.active_path = trans_pri; asoc->peer.retran_path = trans_sec; } struct sctp_transport * sctp_assoc_choose_alter_transport(struct sctp_association *asoc, struct sctp_transport *last_sent_to) { /* If this is the first time packet is sent, use the active path, * else use the retran path. If the last packet was sent over the * retran path, update the retran path and use it. */ if (last_sent_to == NULL) { return asoc->peer.active_path; } else { if (last_sent_to == asoc->peer.retran_path) sctp_assoc_update_retran_path(asoc); return asoc->peer.retran_path; } } /* Update the association's pmtu and frag_point by going through all the * transports. This routine is called when a transport's PMTU has changed. */ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) { struct sctp_transport *t; __u32 pmtu = 0; if (!asoc) return; /* Get the lowest pmtu of all the transports. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (t->pmtu_pending && t->dst) { sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst)); t->pmtu_pending = 0; } if (!pmtu || (t->pathmtu < pmtu)) pmtu = t->pathmtu; } if (pmtu) { asoc->pathmtu = pmtu; asoc->frag_point = sctp_frag_point(asoc, pmtu); } pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc, asoc->pathmtu, asoc->frag_point); } /* Should we send a SACK to update our peer? */ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) { struct net *net = sock_net(asoc->base.sk); switch (asoc->state) { case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: case SCTP_STATE_SHUTDOWN_SENT: if ((asoc->rwnd > asoc->a_rwnd) && ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32, (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift), asoc->pathmtu))) return true; break; default: break; } return false; } /* Increase asoc's rwnd by len and send any window update SACK if needed. */ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) { struct sctp_chunk *sack; struct timer_list *timer; if (asoc->rwnd_over) { if (asoc->rwnd_over >= len) { asoc->rwnd_over -= len; } else { asoc->rwnd += (len - asoc->rwnd_over); asoc->rwnd_over = 0; } } else { asoc->rwnd += len; } /* If we had window pressure, start recovering it * once our rwnd had reached the accumulated pressure * threshold. The idea is to recover slowly, but up * to the initial advertised window. */ if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { int change = min(asoc->pathmtu, asoc->rwnd_press); asoc->rwnd += change; asoc->rwnd_press -= change; } pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, asoc->a_rwnd); /* Send a window update SACK if the rwnd has increased by at least the * minimum of the association's PMTU and half of the receive buffer. * The algorithm used is similar to the one described in * Section 4.2.3.3 of RFC 1122. */ if (sctp_peer_needs_update(asoc)) { asoc->a_rwnd = asoc->rwnd; pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " "a_rwnd:%u\n", __func__, asoc, asoc->rwnd, asoc->a_rwnd); sack = sctp_make_sack(asoc); if (!sack) return; asoc->peer.sack_needed = 0; sctp_outq_tail(&asoc->outqueue, sack); /* Stop the SACK timer. */ timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; if (del_timer(timer)) sctp_association_put(asoc); } } /* Decrease asoc's rwnd by len. */ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) { int rx_count; int over = 0; if (unlikely(!asoc->rwnd || asoc->rwnd_over)) pr_debug("%s: association:%p has asoc->rwnd:%u, " "asoc->rwnd_over:%u!\n", __func__, asoc, asoc->rwnd, asoc->rwnd_over); if (asoc->ep->rcvbuf_policy) rx_count = atomic_read(&asoc->rmem_alloc); else rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); /* If we've reached or overflowed our receive buffer, announce * a 0 rwnd if rwnd would still be positive. Store the * the potential pressure overflow so that the window can be restored * back to original value. */ if (rx_count >= asoc->base.sk->sk_rcvbuf) over = 1; if (asoc->rwnd >= len) { asoc->rwnd -= len; if (over) { asoc->rwnd_press += asoc->rwnd; asoc->rwnd = 0; } } else { asoc->rwnd_over = len - asoc->rwnd; asoc->rwnd = 0; } pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, asoc->rwnd_press); } /* Build the bind address list for the association based on info from the * local endpoint and the remote peer. */ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, sctp_scope_t scope, gfp_t gfp) { int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; if (asoc->peer.ipv6_address) flags |= SCTP_ADDR6_PEERSUPP; return sctp_bind_addr_copy(sock_net(asoc->base.sk), &asoc->base.bind_addr, &asoc->ep->base.bind_addr, scope, gfp, flags); } /* Build the association's bind address list from the cookie. */ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, struct sctp_cookie *cookie, gfp_t gfp) { int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); int var_size3 = cookie->raw_addr_list_len; __u8 *raw = (__u8 *)cookie->peer_init + var_size2; return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, asoc->ep->base.bind_addr.port, gfp); } /* Lookup laddr in the bind address list of an association. */ int sctp_assoc_lookup_laddr(struct sctp_association *asoc, const union sctp_addr *laddr) { int found = 0; if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && sctp_bind_addr_match(&asoc->base.bind_addr, laddr, sctp_sk(asoc->base.sk))) found = 1; return found; } /* Set an association id for a given association */ int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) { bool preload = !!(gfp & __GFP_WAIT); int ret; /* If the id is already assigned, keep it. */ if (asoc->assoc_id) return 0; if (preload) idr_preload(gfp); spin_lock_bh(&sctp_assocs_id_lock); /* 0 is not a valid assoc_id, must be >= 1 */ ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT); spin_unlock_bh(&sctp_assocs_id_lock); if (preload) idr_preload_end(); if (ret < 0) return ret; asoc->assoc_id = (sctp_assoc_t)ret; return 0; } /* Free the ASCONF queue */ static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) { struct sctp_chunk *asconf; struct sctp_chunk *tmp; list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { list_del_init(&asconf->list); sctp_chunk_free(asconf); } } /* Free asconf_ack cache */ static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) { struct sctp_chunk *ack; struct sctp_chunk *tmp; list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, transmitted_list) { list_del_init(&ack->transmitted_list); sctp_chunk_free(ack); } } /* Clean up the ASCONF_ACK queue */ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc) { struct sctp_chunk *ack; struct sctp_chunk *tmp; /* We can remove all the entries from the queue up to * the "Peer-Sequence-Number". */ list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, transmitted_list) { if (ack->subh.addip_hdr->serial == htonl(asoc->peer.addip_serial)) break; list_del_init(&ack->transmitted_list); sctp_chunk_free(ack); } } /* Find the ASCONF_ACK whose serial number matches ASCONF */ struct sctp_chunk *sctp_assoc_lookup_asconf_ack( const struct sctp_association *asoc, __be32 serial) { struct sctp_chunk *ack; /* Walk through the list of cached ASCONF-ACKs and find the * ack chunk whose serial number matches that of the request. */ list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { if (sctp_chunk_pending(ack)) continue; if (ack->subh.addip_hdr->serial == serial) { sctp_chunk_hold(ack); return ack; } } return NULL; } void sctp_asconf_queue_teardown(struct sctp_association *asoc) { /* Free any cached ASCONF_ACK chunk. */ sctp_assoc_free_asconf_acks(asoc); /* Free the ASCONF queue. */ sctp_assoc_free_asconf_queue(asoc); /* Free any cached ASCONF chunk. */ if (asoc->addip_last_asconf) sctp_chunk_free(asoc->addip_last_asconf); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_2171_1
crossvul-cpp_data_good_4966_3
/* * NET4: Implementation of BSD Unix domain sockets. * * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Linus Torvalds : Assorted bug cures. * Niibe Yutaka : async I/O support. * Carsten Paeth : PF_UNIX check, address fixes. * Alan Cox : Limit size of allocated blocks. * Alan Cox : Fixed the stupid socketpair bug. * Alan Cox : BSD compatibility fine tuning. * Alan Cox : Fixed a bug in connect when interrupted. * Alan Cox : Sorted out a proper draft version of * file descriptor passing hacked up from * Mike Shaver's work. * Marty Leisner : Fixes to fd passing * Nick Nevin : recvmsg bugfix. * Alan Cox : Started proper garbage collector * Heiko EiBfeldt : Missing verify_area check * Alan Cox : Started POSIXisms * Andreas Schwab : Replace inode by dentry for proper * reference counting * Kirk Petersen : Made this a module * Christoph Rohland : Elegant non-blocking accept/connect algorithm. * Lots of bug fixes. * Alexey Kuznetosv : Repaired (I hope) bugs introduces * by above two patches. * Andrea Arcangeli : If possible we block in connect(2) * if the max backlog of the listen socket * is been reached. This won't break * old apps and it will avoid huge amount * of socks hashed (this for unix_gc() * performances reasons). * Security fix that limits the max * number of socks to 2*max_files and * the number of skb queueable in the * dgram receiver. * Artur Skawina : Hash function optimizations * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8) * Malcolm Beattie : Set peercred for socketpair * Michal Ostrowski : Module initialization cleanup. * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT, * the core infrastructure is doing that * for all net proto families now (2.5.69+) * * * Known differences from reference BSD that was tested: * * [TO FIX] * ECONNREFUSED is not returned from one end of a connected() socket to the * other the moment one end closes. * fstat() doesn't return st_dev=0, and give the blksize as high water mark * and a fake inode identifier (nor the BSD first socket fstat twice bug). * [NOT TO FIX] * accept() returns a path name even if the connecting socket has closed * in the meantime (BSD loses the path and gives up). * accept() returns 0 length path for an unbound connector. BSD returns 16 * and a null first byte in the path (but not for gethost/peername - BSD bug ??) * socketpair(...SOCK_RAW..) doesn't panic the kernel. * BSD af_unix apparently has connect forgetting to block properly. * (need to check this with the POSIX spec in detail) * * Differences from 2.0.0-11-... (ANK) * Bug fixes and improvements. * - client shutdown killed server socket. * - removed all useless cli/sti pairs. * * Semantic changes/extensions. * - generic control message passing. * - SCM_CREDENTIALS control message. * - "Abstract" (not FS based) socket bindings. * Abstract names are sequences of bytes (not zero terminated) * started by 0, so that this name space does not intersect * with BSD names. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/dcache.h> #include <linux/namei.h> #include <linux/socket.h> #include <linux/un.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in.h> #include <linux/fs.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/af_unix.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/scm.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/rtnetlink.h> #include <linux/mount.h> #include <net/checksum.h> #include <linux/security.h> #include <linux/freezer.h> struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; EXPORT_SYMBOL_GPL(unix_socket_table); DEFINE_SPINLOCK(unix_table_lock); EXPORT_SYMBOL_GPL(unix_table_lock); static atomic_long_t unix_nr_socks; static struct hlist_head *unix_sockets_unbound(void *addr) { unsigned long hash = (unsigned long)addr; hash ^= hash >> 16; hash ^= hash >> 8; hash %= UNIX_HASH_SIZE; return &unix_socket_table[UNIX_HASH_SIZE + hash]; } #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE) #ifdef CONFIG_SECURITY_NETWORK static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) { UNIXCB(skb).secid = scm->secid; } static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) { scm->secid = UNIXCB(skb).secid; } static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) { return (scm->secid == UNIXCB(skb).secid); } #else static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) { } static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) { } static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) { return true; } #endif /* CONFIG_SECURITY_NETWORK */ /* * SMP locking strategy: * hash table is protected with spinlock unix_table_lock * each socket state is protected by separate spin lock. */ static inline unsigned int unix_hash_fold(__wsum n) { unsigned int hash = (__force unsigned int)csum_fold(n); hash ^= hash>>8; return hash&(UNIX_HASH_SIZE-1); } #define unix_peer(sk) (unix_sk(sk)->peer) static inline int unix_our_peer(struct sock *sk, struct sock *osk) { return unix_peer(osk) == sk; } static inline int unix_may_send(struct sock *sk, struct sock *osk) { return unix_peer(osk) == NULL || unix_our_peer(sk, osk); } static inline int unix_recvq_full(struct sock const *sk) { return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; } struct sock *unix_peer_get(struct sock *s) { struct sock *peer; unix_state_lock(s); peer = unix_peer(s); if (peer) sock_hold(peer); unix_state_unlock(s); return peer; } EXPORT_SYMBOL_GPL(unix_peer_get); static inline void unix_release_addr(struct unix_address *addr) { if (atomic_dec_and_test(&addr->refcnt)) kfree(addr); } /* * Check unix socket name: * - should be not zero length. * - if started by not zero, should be NULL terminated (FS object) * - if started by zero, it is abstract name. */ static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp) { if (len <= sizeof(short) || len > sizeof(*sunaddr)) return -EINVAL; if (!sunaddr || sunaddr->sun_family != AF_UNIX) return -EINVAL; if (sunaddr->sun_path[0]) { /* * This may look like an off by one error but it is a bit more * subtle. 108 is the longest valid AF_UNIX path for a binding. * sun_path[108] doesn't as such exist. However in kernel space * we are guaranteed that it is a valid memory location in our * kernel address buffer. */ ((char *)sunaddr)[len] = 0; len = strlen(sunaddr->sun_path)+1+sizeof(short); return len; } *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0)); return len; } static void __unix_remove_socket(struct sock *sk) { sk_del_node_init(sk); } static void __unix_insert_socket(struct hlist_head *list, struct sock *sk) { WARN_ON(!sk_unhashed(sk)); sk_add_node(sk, list); } static inline void unix_remove_socket(struct sock *sk) { spin_lock(&unix_table_lock); __unix_remove_socket(sk); spin_unlock(&unix_table_lock); } static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk) { spin_lock(&unix_table_lock); __unix_insert_socket(list, sk); spin_unlock(&unix_table_lock); } static struct sock *__unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, int len, int type, unsigned int hash) { struct sock *s; sk_for_each(s, &unix_socket_table[hash ^ type]) { struct unix_sock *u = unix_sk(s); if (!net_eq(sock_net(s), net)) continue; if (u->addr->len == len && !memcmp(u->addr->name, sunname, len)) goto found; } s = NULL; found: return s; } static inline struct sock *unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, int len, int type, unsigned int hash) { struct sock *s; spin_lock(&unix_table_lock); s = __unix_find_socket_byname(net, sunname, len, type, hash); if (s) sock_hold(s); spin_unlock(&unix_table_lock); return s; } static struct sock *unix_find_socket_byinode(struct inode *i) { struct sock *s; spin_lock(&unix_table_lock); sk_for_each(s, &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { struct dentry *dentry = unix_sk(s)->path.dentry; if (dentry && d_backing_inode(dentry) == i) { sock_hold(s); goto found; } } s = NULL; found: spin_unlock(&unix_table_lock); return s; } /* Support code for asymmetrically connected dgram sockets * * If a datagram socket is connected to a socket not itself connected * to the first socket (eg, /dev/log), clients may only enqueue more * messages if the present receive queue of the server socket is not * "too large". This means there's a second writeability condition * poll and sendmsg need to test. The dgram recv code will do a wake * up on the peer_wait wait queue of a socket upon reception of a * datagram which needs to be propagated to sleeping would-be writers * since these might not have sent anything so far. This can't be * accomplished via poll_wait because the lifetime of the server * socket might be less than that of its clients if these break their * association with it or if the server socket is closed while clients * are still connected to it and there's no way to inform "a polling * implementation" that it should let go of a certain wait queue * * In order to propagate a wake up, a wait_queue_t of the client * socket is enqueued on the peer_wait queue of the server socket * whose wake function does a wake_up on the ordinary client socket * wait queue. This connection is established whenever a write (or * poll for write) hit the flow control condition and broken when the * association to the server socket is dissolved or after a wake up * was relayed. */ static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags, void *key) { struct unix_sock *u; wait_queue_head_t *u_sleep; u = container_of(q, struct unix_sock, peer_wake); __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait, q); u->peer_wake.private = NULL; /* relaying can only happen while the wq still exists */ u_sleep = sk_sleep(&u->sk); if (u_sleep) wake_up_interruptible_poll(u_sleep, key); return 0; } static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other) { struct unix_sock *u, *u_other; int rc; u = unix_sk(sk); u_other = unix_sk(other); rc = 0; spin_lock(&u_other->peer_wait.lock); if (!u->peer_wake.private) { u->peer_wake.private = other; __add_wait_queue(&u_other->peer_wait, &u->peer_wake); rc = 1; } spin_unlock(&u_other->peer_wait.lock); return rc; } static void unix_dgram_peer_wake_disconnect(struct sock *sk, struct sock *other) { struct unix_sock *u, *u_other; u = unix_sk(sk); u_other = unix_sk(other); spin_lock(&u_other->peer_wait.lock); if (u->peer_wake.private == other) { __remove_wait_queue(&u_other->peer_wait, &u->peer_wake); u->peer_wake.private = NULL; } spin_unlock(&u_other->peer_wait.lock); } static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, struct sock *other) { unix_dgram_peer_wake_disconnect(sk, other); wake_up_interruptible_poll(sk_sleep(sk), POLLOUT | POLLWRNORM | POLLWRBAND); } /* preconditions: * - unix_peer(sk) == other * - association is stable */ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) { int connected; connected = unix_dgram_peer_wake_connect(sk, other); if (unix_recvq_full(other)) return 1; if (connected) unix_dgram_peer_wake_disconnect(sk, other); return 0; } static int unix_writable(const struct sock *sk) { return sk->sk_state != TCP_LISTEN && (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; } static void unix_write_space(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); if (unix_writable(sk)) { wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | POLLWRNORM | POLLWRBAND); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } /* When dgram socket disconnects (or changes its peer), we clear its receive * queue of packets arrived from previous peer. First, it allows to do * flow control based only on wmem_alloc; second, sk connected to peer * may receive messages only from that peer. */ static void unix_dgram_disconnected(struct sock *sk, struct sock *other) { if (!skb_queue_empty(&sk->sk_receive_queue)) { skb_queue_purge(&sk->sk_receive_queue); wake_up_interruptible_all(&unix_sk(sk)->peer_wait); /* If one link of bidirectional dgram pipe is disconnected, * we signal error. Messages are lost. Do not make this, * when peer was not connected to us. */ if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) { other->sk_err = ECONNRESET; other->sk_error_report(other); } } } static void unix_sock_destructor(struct sock *sk) { struct unix_sock *u = unix_sk(sk); skb_queue_purge(&sk->sk_receive_queue); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(!sk_unhashed(sk)); WARN_ON(sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { pr_info("Attempt to release alive unix socket: %p\n", sk); return; } if (u->addr) unix_release_addr(u->addr); atomic_long_dec(&unix_nr_socks); local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); #ifdef UNIX_REFCNT_DEBUG pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk, atomic_long_read(&unix_nr_socks)); #endif } static void unix_release_sock(struct sock *sk, int embrion) { struct unix_sock *u = unix_sk(sk); struct path path; struct sock *skpair; struct sk_buff *skb; int state; unix_remove_socket(sk); /* Clear state */ unix_state_lock(sk); sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; path = u->path; u->path.dentry = NULL; u->path.mnt = NULL; state = sk->sk_state; sk->sk_state = TCP_CLOSE; unix_state_unlock(sk); wake_up_interruptible_all(&u->peer_wait); skpair = unix_peer(sk); if (skpair != NULL) { if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { unix_state_lock(skpair); /* No more writes */ skpair->sk_shutdown = SHUTDOWN_MASK; if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) skpair->sk_err = ECONNRESET; unix_state_unlock(skpair); skpair->sk_state_change(skpair); sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); } unix_dgram_peer_wake_disconnect(sk, skpair); sock_put(skpair); /* It may now die */ unix_peer(sk) = NULL; } /* Try to flush out this socket. Throw out buffers at least */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (state == TCP_LISTEN) unix_release_sock(skb->sk, 1); /* passed fds are erased in the kfree_skb hook */ UNIXCB(skb).consumed = skb->len; kfree_skb(skb); } if (path.dentry) path_put(&path); sock_put(sk); /* ---- Socket is dead now and most probably destroyed ---- */ /* * Fixme: BSD difference: In BSD all sockets connected to us get * ECONNRESET and we die on the spot. In Linux we behave * like files and pipes do and wait for the last * dereference. * * Can't we simply set sock->err? * * What the above comment does talk about? --ANK(980817) */ if (unix_tot_inflight) unix_gc(); /* Garbage collect fds */ } static void init_peercred(struct sock *sk) { put_pid(sk->sk_peer_pid); if (sk->sk_peer_cred) put_cred(sk->sk_peer_cred); sk->sk_peer_pid = get_pid(task_tgid(current)); sk->sk_peer_cred = get_current_cred(); } static void copy_peercred(struct sock *sk, struct sock *peersk) { put_pid(sk->sk_peer_pid); if (sk->sk_peer_cred) put_cred(sk->sk_peer_cred); sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); } static int unix_listen(struct socket *sock, int backlog) { int err; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); struct pid *old_pid = NULL; err = -EOPNOTSUPP; if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) goto out; /* Only stream/seqpacket sockets accept */ err = -EINVAL; if (!u->addr) goto out; /* No listens on an unbound socket */ unix_state_lock(sk); if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) goto out_unlock; if (backlog > sk->sk_max_ack_backlog) wake_up_interruptible_all(&u->peer_wait); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; /* set credentials so connect can copy them */ init_peercred(sk); err = 0; out_unlock: unix_state_unlock(sk); put_pid(old_pid); out: return err; } static int unix_release(struct socket *); static int unix_bind(struct socket *, struct sockaddr *, int); static int unix_stream_connect(struct socket *, struct sockaddr *, int addr_len, int flags); static int unix_socketpair(struct socket *, struct socket *); static int unix_accept(struct socket *, struct socket *, int); static int unix_getname(struct socket *, struct sockaddr *, int *, int); static unsigned int unix_poll(struct file *, struct socket *, poll_table *); static unsigned int unix_dgram_poll(struct file *, struct socket *, poll_table *); static int unix_ioctl(struct socket *, unsigned int, unsigned long); static int unix_shutdown(struct socket *, int); static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int); static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset, size_t size, int flags); static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos, struct pipe_inode_info *, size_t size, unsigned int flags); static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int); static int unix_dgram_connect(struct socket *, struct sockaddr *, int, int); static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t, int); static int unix_set_peek_off(struct sock *sk, int val) { struct unix_sock *u = unix_sk(sk); if (mutex_lock_interruptible(&u->readlock)) return -EINTR; sk->sk_peek_off = val; mutex_unlock(&u->readlock); return 0; } static const struct proto_ops unix_stream_ops = { .family = PF_UNIX, .owner = THIS_MODULE, .release = unix_release, .bind = unix_bind, .connect = unix_stream_connect, .socketpair = unix_socketpair, .accept = unix_accept, .getname = unix_getname, .poll = unix_poll, .ioctl = unix_ioctl, .listen = unix_listen, .shutdown = unix_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = unix_stream_sendmsg, .recvmsg = unix_stream_recvmsg, .mmap = sock_no_mmap, .sendpage = unix_stream_sendpage, .splice_read = unix_stream_splice_read, .set_peek_off = unix_set_peek_off, }; static const struct proto_ops unix_dgram_ops = { .family = PF_UNIX, .owner = THIS_MODULE, .release = unix_release, .bind = unix_bind, .connect = unix_dgram_connect, .socketpair = unix_socketpair, .accept = sock_no_accept, .getname = unix_getname, .poll = unix_dgram_poll, .ioctl = unix_ioctl, .listen = sock_no_listen, .shutdown = unix_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = unix_dgram_sendmsg, .recvmsg = unix_dgram_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, .set_peek_off = unix_set_peek_off, }; static const struct proto_ops unix_seqpacket_ops = { .family = PF_UNIX, .owner = THIS_MODULE, .release = unix_release, .bind = unix_bind, .connect = unix_stream_connect, .socketpair = unix_socketpair, .accept = unix_accept, .getname = unix_getname, .poll = unix_dgram_poll, .ioctl = unix_ioctl, .listen = unix_listen, .shutdown = unix_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = unix_seqpacket_sendmsg, .recvmsg = unix_seqpacket_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, .set_peek_off = unix_set_peek_off, }; static struct proto unix_proto = { .name = "UNIX", .owner = THIS_MODULE, .obj_size = sizeof(struct unix_sock), }; /* * AF_UNIX sockets do not interact with hardware, hence they * dont trigger interrupts - so it's safe for them to have * bh-unsafe locking for their sk_receive_queue.lock. Split off * this special lock-class by reinitializing the spinlock key: */ static struct lock_class_key af_unix_sk_receive_queue_lock_key; static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) { struct sock *sk = NULL; struct unix_sock *u; atomic_long_inc(&unix_nr_socks); if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) goto out; sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern); if (!sk) goto out; sock_init_data(sock, sk); lockdep_set_class(&sk->sk_receive_queue.lock, &af_unix_sk_receive_queue_lock_key); sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; sk->sk_destruct = unix_sock_destructor; u = unix_sk(sk); u->path.dentry = NULL; u->path.mnt = NULL; spin_lock_init(&u->lock); atomic_long_set(&u->inflight, 0); INIT_LIST_HEAD(&u->link); mutex_init(&u->readlock); /* single task reading lock */ init_waitqueue_head(&u->peer_wait); init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); unix_insert_socket(unix_sockets_unbound(sk), sk); out: if (sk == NULL) atomic_long_dec(&unix_nr_socks); else { local_bh_disable(); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); local_bh_enable(); } return sk; } static int unix_create(struct net *net, struct socket *sock, int protocol, int kern) { if (protocol && protocol != PF_UNIX) return -EPROTONOSUPPORT; sock->state = SS_UNCONNECTED; switch (sock->type) { case SOCK_STREAM: sock->ops = &unix_stream_ops; break; /* * Believe it or not BSD has AF_UNIX, SOCK_RAW though * nothing uses it. */ case SOCK_RAW: sock->type = SOCK_DGRAM; case SOCK_DGRAM: sock->ops = &unix_dgram_ops; break; case SOCK_SEQPACKET: sock->ops = &unix_seqpacket_ops; break; default: return -ESOCKTNOSUPPORT; } return unix_create1(net, sock, kern) ? 0 : -ENOMEM; } static int unix_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) return 0; unix_release_sock(sk, 0); sock->sk = NULL; return 0; } static int unix_autobind(struct socket *sock) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); static u32 ordernum = 1; struct unix_address *addr; int err; unsigned int retries = 0; err = mutex_lock_interruptible(&u->readlock); if (err) return err; err = 0; if (u->addr) goto out; err = -ENOMEM; addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); if (!addr) goto out; addr->name->sun_family = AF_UNIX; atomic_set(&addr->refcnt, 1); retry: addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short); addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0)); spin_lock(&unix_table_lock); ordernum = (ordernum+1)&0xFFFFF; if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, addr->hash)) { spin_unlock(&unix_table_lock); /* * __unix_find_socket_byname() may take long time if many names * are already in use. */ cond_resched(); /* Give up if all names seems to be in use. */ if (retries++ == 0xFFFFF) { err = -ENOSPC; kfree(addr); goto out; } goto retry; } addr->hash ^= sk->sk_type; __unix_remove_socket(sk); u->addr = addr; __unix_insert_socket(&unix_socket_table[addr->hash], sk); spin_unlock(&unix_table_lock); err = 0; out: mutex_unlock(&u->readlock); return err; } static struct sock *unix_find_other(struct net *net, struct sockaddr_un *sunname, int len, int type, unsigned int hash, int *error) { struct sock *u; struct path path; int err = 0; if (sunname->sun_path[0]) { struct inode *inode; err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); if (err) goto fail; inode = d_backing_inode(path.dentry); err = inode_permission(inode, MAY_WRITE); if (err) goto put_fail; err = -ECONNREFUSED; if (!S_ISSOCK(inode->i_mode)) goto put_fail; u = unix_find_socket_byinode(inode); if (!u) goto put_fail; if (u->sk_type == type) touch_atime(&path); path_put(&path); err = -EPROTOTYPE; if (u->sk_type != type) { sock_put(u); goto fail; } } else { err = -ECONNREFUSED; u = unix_find_socket_byname(net, sunname, len, type, hash); if (u) { struct dentry *dentry; dentry = unix_sk(u)->path.dentry; if (dentry) touch_atime(&unix_sk(u)->path); } else goto fail; } return u; put_fail: path_put(&path); fail: *error = err; return NULL; } static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode, struct path *res) { int err; err = security_path_mknod(path, dentry, mode, 0); if (!err) { err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0); if (!err) { res->mnt = mntget(path->mnt); res->dentry = dget(dentry); } } return err; } static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; char *sun_path = sunaddr->sun_path; int err, name_err; unsigned int hash; struct unix_address *addr; struct hlist_head *list; struct path path; struct dentry *dentry; err = -EINVAL; if (sunaddr->sun_family != AF_UNIX) goto out; if (addr_len == sizeof(short)) { err = unix_autobind(sock); goto out; } err = unix_mkname(sunaddr, addr_len, &hash); if (err < 0) goto out; addr_len = err; name_err = 0; dentry = NULL; if (sun_path[0]) { /* Get the parent directory, calculate the hash for last * component. */ dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); if (IS_ERR(dentry)) { /* delay report until after 'already bound' check */ name_err = PTR_ERR(dentry); dentry = NULL; } } err = mutex_lock_interruptible(&u->readlock); if (err) goto out_path; err = -EINVAL; if (u->addr) goto out_up; if (name_err) { err = name_err == -EEXIST ? -EADDRINUSE : name_err; goto out_up; } err = -ENOMEM; addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL); if (!addr) goto out_up; memcpy(addr->name, sunaddr, addr_len); addr->len = addr_len; addr->hash = hash ^ sk->sk_type; atomic_set(&addr->refcnt, 1); if (dentry) { struct path u_path; umode_t mode = S_IFSOCK | (SOCK_INODE(sock)->i_mode & ~current_umask()); err = unix_mknod(dentry, &path, mode, &u_path); if (err) { if (err == -EEXIST) err = -EADDRINUSE; unix_release_addr(addr); goto out_up; } addr->hash = UNIX_HASH_SIZE; hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); spin_lock(&unix_table_lock); u->path = u_path; list = &unix_socket_table[hash]; } else { spin_lock(&unix_table_lock); err = -EADDRINUSE; if (__unix_find_socket_byname(net, sunaddr, addr_len, sk->sk_type, hash)) { unix_release_addr(addr); goto out_unlock; } list = &unix_socket_table[addr->hash]; } err = 0; __unix_remove_socket(sk); u->addr = addr; __unix_insert_socket(list, sk); out_unlock: spin_unlock(&unix_table_lock); out_up: mutex_unlock(&u->readlock); out_path: if (dentry) done_path_create(&path, dentry); out: return err; } static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) { if (unlikely(sk1 == sk2) || !sk2) { unix_state_lock(sk1); return; } if (sk1 < sk2) { unix_state_lock(sk1); unix_state_lock_nested(sk2); } else { unix_state_lock(sk2); unix_state_lock_nested(sk1); } } static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) { if (unlikely(sk1 == sk2) || !sk2) { unix_state_unlock(sk1); return; } unix_state_unlock(sk1); unix_state_unlock(sk2); } static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; struct sock *other; unsigned int hash; int err; if (addr->sa_family != AF_UNSPEC) { err = unix_mkname(sunaddr, alen, &hash); if (err < 0) goto out; alen = err; if (test_bit(SOCK_PASSCRED, &sock->flags) && !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0) goto out; restart: other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err); if (!other) goto out; unix_state_double_lock(sk, other); /* Apparently VFS overslept socket death. Retry. */ if (sock_flag(other, SOCK_DEAD)) { unix_state_double_unlock(sk, other); sock_put(other); goto restart; } err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; } else { /* * 1003.1g breaking connected state with AF_UNSPEC */ other = NULL; unix_state_double_lock(sk, other); } /* * If it was connected, reconnect. */ if (unix_peer(sk)) { struct sock *old_peer = unix_peer(sk); unix_peer(sk) = other; unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); unix_state_double_unlock(sk, other); if (other != old_peer) unix_dgram_disconnected(sk, old_peer); sock_put(old_peer); } else { unix_peer(sk) = other; unix_state_double_unlock(sk, other); } return 0; out_unlock: unix_state_double_unlock(sk, other); sock_put(other); out: return err; } static long unix_wait_for_peer(struct sock *other, long timeo) { struct unix_sock *u = unix_sk(other); int sched; DEFINE_WAIT(wait); prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE); sched = !sock_flag(other, SOCK_DEAD) && !(other->sk_shutdown & RCV_SHUTDOWN) && unix_recvq_full(other); unix_state_unlock(other); if (sched) timeo = schedule_timeout(timeo); finish_wait(&u->peer_wait, &wait); return timeo; } static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk), *newu, *otheru; struct sock *newsk = NULL; struct sock *other = NULL; struct sk_buff *skb = NULL; unsigned int hash; int st; int err; long timeo; err = unix_mkname(sunaddr, addr_len, &hash); if (err < 0) goto out; addr_len = err; if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); /* First of all allocate resources. If we will make it after state is locked, we will have to recheck all again in any case. */ err = -ENOMEM; /* create new sock for complete connection */ newsk = unix_create1(sock_net(sk), NULL, 0); if (newsk == NULL) goto out; /* Allocate skb for sending to listening sock */ skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); if (skb == NULL) goto out; restart: /* Find listening sock. */ other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err); if (!other) goto out; /* Latch state of peer */ unix_state_lock(other); /* Apparently VFS overslept socket death. Retry. */ if (sock_flag(other, SOCK_DEAD)) { unix_state_unlock(other); sock_put(other); goto restart; } err = -ECONNREFUSED; if (other->sk_state != TCP_LISTEN) goto out_unlock; if (other->sk_shutdown & RCV_SHUTDOWN) goto out_unlock; if (unix_recvq_full(other)) { err = -EAGAIN; if (!timeo) goto out_unlock; timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out; sock_put(other); goto restart; } /* Latch our state. It is tricky place. We need to grab our state lock and cannot drop lock on peer. It is dangerous because deadlock is possible. Connect to self case and simultaneous attempt to connect are eliminated by checking socket state. other is TCP_LISTEN, if sk is TCP_LISTEN we check this before attempt to grab lock. Well, and we have to recheck the state after socket locked. */ st = sk->sk_state; switch (st) { case TCP_CLOSE: /* This is ok... continue with connect */ break; case TCP_ESTABLISHED: /* Socket is already connected */ err = -EISCONN; goto out_unlock; default: err = -EINVAL; goto out_unlock; } unix_state_lock_nested(sk); if (sk->sk_state != st) { unix_state_unlock(sk); unix_state_unlock(other); sock_put(other); goto restart; } err = security_unix_stream_connect(sk, other, newsk); if (err) { unix_state_unlock(sk); goto out_unlock; } /* The way is open! Fastly set all the necessary fields... */ sock_hold(sk); unix_peer(newsk) = sk; newsk->sk_state = TCP_ESTABLISHED; newsk->sk_type = sk->sk_type; init_peercred(newsk); newu = unix_sk(newsk); RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); otheru = unix_sk(other); /* copy address information from listening to new sock*/ if (otheru->addr) { atomic_inc(&otheru->addr->refcnt); newu->addr = otheru->addr; } if (otheru->path.dentry) { path_get(&otheru->path); newu->path = otheru->path; } /* Set credentials */ copy_peercred(sk, other); sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; sock_hold(newsk); smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */ unix_peer(sk) = newsk; unix_state_unlock(sk); /* take ten and and send info to listening sock */ spin_lock(&other->sk_receive_queue.lock); __skb_queue_tail(&other->sk_receive_queue, skb); spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); other->sk_data_ready(other); sock_put(other); return 0; out_unlock: if (other) unix_state_unlock(other); out: kfree_skb(skb); if (newsk) unix_release_sock(newsk, 0); if (other) sock_put(other); return err; } static int unix_socketpair(struct socket *socka, struct socket *sockb) { struct sock *ska = socka->sk, *skb = sockb->sk; /* Join our sockets back to back */ sock_hold(ska); sock_hold(skb); unix_peer(ska) = skb; unix_peer(skb) = ska; init_peercred(ska); init_peercred(skb); if (ska->sk_type != SOCK_DGRAM) { ska->sk_state = TCP_ESTABLISHED; skb->sk_state = TCP_ESTABLISHED; socka->state = SS_CONNECTED; sockb->state = SS_CONNECTED; } return 0; } static void unix_sock_inherit_flags(const struct socket *old, struct socket *new) { if (test_bit(SOCK_PASSCRED, &old->flags)) set_bit(SOCK_PASSCRED, &new->flags); if (test_bit(SOCK_PASSSEC, &old->flags)) set_bit(SOCK_PASSSEC, &new->flags); } static int unix_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct sock *tsk; struct sk_buff *skb; int err; err = -EOPNOTSUPP; if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) goto out; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out; /* If socket state is TCP_LISTEN it cannot change (for now...), * so that no locks are necessary. */ skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err); if (!skb) { /* This means receive shutdown. */ if (err == 0) err = -EINVAL; goto out; } tsk = skb->sk; skb_free_datagram(sk, skb); wake_up_interruptible(&unix_sk(sk)->peer_wait); /* attach accepted sock to socket */ unix_state_lock(tsk); newsock->state = SS_CONNECTED; unix_sock_inherit_flags(sock, newsock); sock_graft(tsk, newsock); unix_state_unlock(tsk); return 0; out: return err; } static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sock *sk = sock->sk; struct unix_sock *u; DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); int err = 0; if (peer) { sk = unix_peer_get(sk); err = -ENOTCONN; if (!sk) goto out; err = 0; } else { sock_hold(sk); } u = unix_sk(sk); unix_state_lock(sk); if (!u->addr) { sunaddr->sun_family = AF_UNIX; sunaddr->sun_path[0] = 0; *uaddr_len = sizeof(short); } else { struct unix_address *addr = u->addr; *uaddr_len = addr->len; memcpy(sunaddr, addr->name, *uaddr_len); } unix_state_unlock(sk); sock_put(sk); out: return err; } static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; scm->fp = UNIXCB(skb).fp; UNIXCB(skb).fp = NULL; for (i = scm->fp->count-1; i >= 0; i--) unix_notinflight(scm->fp->user, scm->fp->fp[i]); } static void unix_destruct_scm(struct sk_buff *skb) { struct scm_cookie scm; memset(&scm, 0, sizeof(scm)); scm.pid = UNIXCB(skb).pid; if (UNIXCB(skb).fp) unix_detach_fds(&scm, skb); /* Alas, it calls VFS */ /* So fscking what? fput() had been SMP-safe since the last Summer */ scm_destroy(&scm); sock_wfree(skb); } /* * The "user->unix_inflight" variable is protected by the garbage * collection lock, and we just read it locklessly here. If you go * over the limit, there might be a tiny race in actually noticing * it across threads. Tough. */ static inline bool too_many_unix_fds(struct task_struct *p) { struct user_struct *user = current_user(); if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); return false; } #define MAX_RECURSION_LEVEL 4 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; unsigned char max_level = 0; int unix_sock_count = 0; if (too_many_unix_fds(current)) return -ETOOMANYREFS; for (i = scm->fp->count - 1; i >= 0; i--) { struct sock *sk = unix_get_socket(scm->fp->fp[i]); if (sk) { unix_sock_count++; max_level = max(max_level, unix_sk(sk)->recursion_level); } } if (unlikely(max_level > MAX_RECURSION_LEVEL)) return -ETOOMANYREFS; /* * Need to duplicate file references for the sake of garbage * collection. Otherwise a socket in the fps might become a * candidate for GC while the skb is not yet queued. */ UNIXCB(skb).fp = scm_fp_dup(scm->fp); if (!UNIXCB(skb).fp) return -ENOMEM; for (i = scm->fp->count - 1; i >= 0; i--) unix_inflight(scm->fp->user, scm->fp->fp[i]); return max_level; } static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) { int err = 0; UNIXCB(skb).pid = get_pid(scm->pid); UNIXCB(skb).uid = scm->creds.uid; UNIXCB(skb).gid = scm->creds.gid; UNIXCB(skb).fp = NULL; unix_get_secdata(scm, skb); if (scm->fp && send_fds) err = unix_attach_fds(scm, skb); skb->destructor = unix_destruct_scm; return err; } static bool unix_passcred_enabled(const struct socket *sock, const struct sock *other) { return test_bit(SOCK_PASSCRED, &sock->flags) || !other->sk_socket || test_bit(SOCK_PASSCRED, &other->sk_socket->flags); } /* * Some apps rely on write() giving SCM_CREDENTIALS * We include credentials if source or destination socket * asserted SOCK_PASSCRED. */ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, const struct sock *other) { if (UNIXCB(skb).pid) return; if (unix_passcred_enabled(sock, other)) { UNIXCB(skb).pid = get_pid(task_tgid(current)); current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); } } static int maybe_init_creds(struct scm_cookie *scm, struct socket *socket, const struct sock *other) { int err; struct msghdr msg = { .msg_controllen = 0 }; err = scm_send(socket, &msg, scm, false); if (err) return err; if (unix_passcred_enabled(socket, other)) { scm->pid = get_pid(task_tgid(current)); current_uid_gid(&scm->creds.uid, &scm->creds.gid); } return err; } static bool unix_skb_scm_eq(struct sk_buff *skb, struct scm_cookie *scm) { const struct unix_skb_parms *u = &UNIXCB(skb); return u->pid == scm->pid && uid_eq(u->uid, scm->creds.uid) && gid_eq(u->gid, scm->creds.gid) && unix_secdata_eq(scm, skb); } /* * Send AF_UNIX data. */ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); struct sock *other = NULL; int namelen = 0; /* fake GCC */ int err; unsigned int hash; struct sk_buff *skb; long timeo; struct scm_cookie scm; int max_level; int data_len = 0; int sk_locked; wait_for_unix_gc(); err = scm_send(sock, msg, &scm, false); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out; if (msg->msg_namelen) { err = unix_mkname(sunaddr, msg->msg_namelen, &hash); if (err < 0) goto out; namelen = err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer_get(sk); if (!other) goto out; } if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; if (len > SKB_MAX_ALLOC) { data_len = min_t(size_t, len - SKB_MAX_ALLOC, MAX_SKB_FRAGS * PAGE_SIZE); data_len = PAGE_ALIGN(data_len); BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE); } skb = sock_alloc_send_pskb(sk, len - data_len, data_len, msg->msg_flags & MSG_DONTWAIT, &err, PAGE_ALLOC_COSTLY_ORDER); if (skb == NULL) goto out; err = unix_scm_to_skb(&scm, skb, true); if (err < 0) goto out_free; max_level = err + 1; skb_put(skb, len - data_len); skb->data_len = data_len; skb->len = len; err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); if (err) goto out_free; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); restart: if (!other) { err = -ECONNRESET; if (sunaddr == NULL) goto out_free; other = unix_find_other(net, sunaddr, namelen, sk->sk_type, hash, &err); if (other == NULL) goto out_free; } if (sk_filter(other, skb) < 0) { /* Toss the packet but do not return any error to the sender */ err = len; goto out_free; } sk_locked = 0; unix_state_lock(other); restart_locked: err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; if (unlikely(sock_flag(other, SOCK_DEAD))) { /* * Check with 1003.1g - what should * datagram error */ unix_state_unlock(other); sock_put(other); if (!sk_locked) unix_state_lock(sk); err = 0; if (unix_peer(sk) == other) { unix_peer(sk) = NULL; unix_dgram_peer_wake_disconnect_wakeup(sk, other); unix_state_unlock(sk); unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; } else { unix_state_unlock(sk); } other = NULL; if (err) goto out_free; goto restart; } err = -EPIPE; if (other->sk_shutdown & RCV_SHUTDOWN) goto out_unlock; if (sk->sk_type != SOCK_SEQPACKET) { err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; } if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { if (timeo) { timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out_free; goto restart; } if (!sk_locked) { unix_state_unlock(other); unix_state_double_lock(sk, other); } if (unix_peer(sk) != other || unix_dgram_peer_wake_me(sk, other)) { err = -EAGAIN; sk_locked = 1; goto out_unlock; } if (!sk_locked) { sk_locked = 1; goto restart_locked; } } if (unlikely(sk_locked)) unix_state_unlock(sk); if (sock_flag(other, SOCK_RCVTSTAMP)) __net_timestamp(skb); maybe_add_creds(skb, sock, other); skb_queue_tail(&other->sk_receive_queue, skb); if (max_level > unix_sk(other)->recursion_level) unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other); sock_put(other); scm_destroy(&scm); return len; out_unlock: if (sk_locked) unix_state_unlock(sk); unix_state_unlock(other); out_free: kfree_skb(skb); out: if (other) sock_put(other); scm_destroy(&scm); return err; } /* We use paged skbs for stream sockets, and limit occupancy to 32768 * bytes, and a minimun of a full page. */ #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sock *other = NULL; int err, size; struct sk_buff *skb; int sent = 0; struct scm_cookie scm; bool fds_sent = false; int max_level; int data_len; wait_for_unix_gc(); err = scm_send(sock, msg, &scm, false); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out_err; if (msg->msg_namelen) { err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; goto out_err; } else { err = -ENOTCONN; other = unix_peer(sk); if (!other) goto out_err; } if (sk->sk_shutdown & SEND_SHUTDOWN) goto pipe_err; while (sent < len) { size = len - sent; /* Keep two messages in the pipe so it schedules better */ size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64); /* allow fallback to order-0 allocations */ size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); data_len = min_t(size_t, size, PAGE_ALIGN(data_len)); skb = sock_alloc_send_pskb(sk, size - data_len, data_len, msg->msg_flags & MSG_DONTWAIT, &err, get_order(UNIX_SKB_FRAGS_SZ)); if (!skb) goto out_err; /* Only send the fds in the first buffer */ err = unix_scm_to_skb(&scm, skb, !fds_sent); if (err < 0) { kfree_skb(skb); goto out_err; } max_level = err + 1; fds_sent = true; skb_put(skb, size - data_len); skb->data_len = data_len; skb->len = size; err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); if (err) { kfree_skb(skb); goto out_err; } unix_state_lock(other); if (sock_flag(other, SOCK_DEAD) || (other->sk_shutdown & RCV_SHUTDOWN)) goto pipe_err_free; maybe_add_creds(skb, sock, other); skb_queue_tail(&other->sk_receive_queue, skb); if (max_level > unix_sk(other)->recursion_level) unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other); sent += size; } scm_destroy(&scm); return sent; pipe_err_free: unix_state_unlock(other); kfree_skb(skb); pipe_err: if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); err = -EPIPE; out_err: scm_destroy(&scm); return sent ? : err; } static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, int offset, size_t size, int flags) { int err; bool send_sigpipe = false; bool init_scm = true; struct scm_cookie scm; struct sock *other, *sk = socket->sk; struct sk_buff *skb, *newskb = NULL, *tail = NULL; if (flags & MSG_OOB) return -EOPNOTSUPP; other = unix_peer(sk); if (!other || sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; if (false) { alloc_skb: unix_state_unlock(other); mutex_unlock(&unix_sk(other)->readlock); newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, &err, 0); if (!newskb) goto err; } /* we must acquire readlock as we modify already present * skbs in the sk_receive_queue and mess with skb->len */ err = mutex_lock_interruptible(&unix_sk(other)->readlock); if (err) { err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; goto err; } if (sk->sk_shutdown & SEND_SHUTDOWN) { err = -EPIPE; send_sigpipe = true; goto err_unlock; } unix_state_lock(other); if (sock_flag(other, SOCK_DEAD) || other->sk_shutdown & RCV_SHUTDOWN) { err = -EPIPE; send_sigpipe = true; goto err_state_unlock; } if (init_scm) { err = maybe_init_creds(&scm, socket, other); if (err) goto err_state_unlock; init_scm = false; } skb = skb_peek_tail(&other->sk_receive_queue); if (tail && tail == skb) { skb = newskb; } else if (!skb || !unix_skb_scm_eq(skb, &scm)) { if (newskb) { skb = newskb; } else { tail = skb; goto alloc_skb; } } else if (newskb) { /* this is fast path, we don't necessarily need to * call to kfree_skb even though with newskb == NULL * this - does no harm */ consume_skb(newskb); newskb = NULL; } if (skb_append_pagefrags(skb, page, offset, size)) { tail = skb; goto alloc_skb; } skb->len += size; skb->data_len += size; skb->truesize += size; atomic_add(size, &sk->sk_wmem_alloc); if (newskb) { err = unix_scm_to_skb(&scm, skb, false); if (err) goto err_state_unlock; spin_lock(&other->sk_receive_queue.lock); __skb_queue_tail(&other->sk_receive_queue, newskb); spin_unlock(&other->sk_receive_queue.lock); } unix_state_unlock(other); mutex_unlock(&unix_sk(other)->readlock); other->sk_data_ready(other); scm_destroy(&scm); return size; err_state_unlock: unix_state_unlock(other); err_unlock: mutex_unlock(&unix_sk(other)->readlock); err: kfree_skb(newskb); if (send_sigpipe && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); if (!init_scm) scm_destroy(&scm); return err; } static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { int err; struct sock *sk = sock->sk; err = sock_error(sk); if (err) return err; if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; if (msg->msg_namelen) msg->msg_namelen = 0; return unix_dgram_sendmsg(sock, msg, len); } static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; return unix_dgram_recvmsg(sock, msg, size, flags); } static void unix_copy_addr(struct msghdr *msg, struct sock *sk) { struct unix_sock *u = unix_sk(sk); if (u->addr) { msg->msg_namelen = u->addr->len; memcpy(msg->msg_name, u->addr->name, u->addr->len); } } static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct scm_cookie scm; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); struct sk_buff *skb, *last; long timeo; int err; int peeked, skip; err = -EOPNOTSUPP; if (flags&MSG_OOB) goto out; timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { mutex_lock(&u->readlock); skip = sk_peek_offset(sk, flags); skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err, &last); if (skb) break; mutex_unlock(&u->readlock); if (err != -EAGAIN) break; } while (timeo && !__skb_wait_for_more_packets(sk, &err, &timeo, last)); if (!skb) { /* implies readlock unlocked */ unix_state_lock(sk); /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && (sk->sk_shutdown & RCV_SHUTDOWN)) err = 0; unix_state_unlock(sk); goto out; } if (wq_has_sleeper(&u->peer_wait)) wake_up_interruptible_sync_poll(&u->peer_wait, POLLOUT | POLLWRNORM | POLLWRBAND); if (msg->msg_name) unix_copy_addr(msg, skb->sk); if (size > skb->len - skip) size = skb->len - skip; else if (size < skb->len - skip) msg->msg_flags |= MSG_TRUNC; err = skb_copy_datagram_msg(skb, skip, msg, size); if (err) goto out_free; if (sock_flag(sk, SOCK_RCVTSTAMP)) __sock_recv_timestamp(msg, sk, skb); memset(&scm, 0, sizeof(scm)); scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); unix_set_secdata(&scm, skb); if (!(flags & MSG_PEEK)) { if (UNIXCB(skb).fp) unix_detach_fds(&scm, skb); sk_peek_offset_bwd(sk, skb->len); } else { /* It is questionable: on PEEK we could: - do not return fds - good, but too simple 8) - return fds, and do not return them on read (old strategy, apparently wrong) - clone fds (I chose it for now, it is the most universal solution) POSIX 1003.1g does not actually define this clearly at all. POSIX 1003.1g doesn't define a lot of things clearly however! */ sk_peek_offset_fwd(sk, size); if (UNIXCB(skb).fp) scm.fp = scm_fp_dup(UNIXCB(skb).fp); } err = (flags & MSG_TRUNC) ? skb->len - skip : size; scm_recv(sock, msg, &scm, flags); out_free: skb_free_datagram(sk, skb); mutex_unlock(&u->readlock); out: return err; } /* * Sleep until more data has arrived. But check for races.. */ static long unix_stream_data_wait(struct sock *sk, long timeo, struct sk_buff *last, unsigned int last_len) { struct sk_buff *tail; DEFINE_WAIT(wait); unix_state_lock(sk); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); tail = skb_peek_tail(&sk->sk_receive_queue); if (tail != last || (tail && tail->len != last_len) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || !timeo) break; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); unix_state_unlock(sk); timeo = freezable_schedule_timeout(timeo); unix_state_lock(sk); if (sock_flag(sk, SOCK_DEAD)) break; sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); } finish_wait(sk_sleep(sk), &wait); unix_state_unlock(sk); return timeo; } static unsigned int unix_skb_len(const struct sk_buff *skb) { return skb->len - UNIXCB(skb).consumed; } struct unix_stream_read_state { int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); struct socket *socket; struct msghdr *msg; struct pipe_inode_info *pipe; size_t size; int flags; unsigned int splice_flags; }; static int unix_stream_read_generic(struct unix_stream_read_state *state) { struct scm_cookie scm; struct socket *sock = state->socket; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); int copied = 0; int flags = state->flags; int noblock = flags & MSG_DONTWAIT; bool check_creds = false; int target; int err = 0; long timeo; int skip; size_t size = state->size; unsigned int last_len; err = -EINVAL; if (sk->sk_state != TCP_ESTABLISHED) goto out; err = -EOPNOTSUPP; if (flags & MSG_OOB) goto out; target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, noblock); memset(&scm, 0, sizeof(scm)); /* Lock the socket to prevent queue disordering * while sleeps in memcpy_tomsg */ mutex_lock(&u->readlock); if (flags & MSG_PEEK) skip = sk_peek_offset(sk, flags); else skip = 0; do { int chunk; bool drop_skb; struct sk_buff *skb, *last; unix_state_lock(sk); if (sock_flag(sk, SOCK_DEAD)) { err = -ECONNRESET; goto unlock; } last = skb = skb_peek(&sk->sk_receive_queue); last_len = last ? last->len : 0; again: if (skb == NULL) { unix_sk(sk)->recursion_level = 0; if (copied >= target) goto unlock; /* * POSIX 1003.1g mandates this order. */ err = sock_error(sk); if (err) goto unlock; if (sk->sk_shutdown & RCV_SHUTDOWN) goto unlock; unix_state_unlock(sk); err = -EAGAIN; if (!timeo) break; mutex_unlock(&u->readlock); timeo = unix_stream_data_wait(sk, timeo, last, last_len); if (signal_pending(current)) { err = sock_intr_errno(timeo); scm_destroy(&scm); goto out; } mutex_lock(&u->readlock); continue; unlock: unix_state_unlock(sk); break; } while (skip >= unix_skb_len(skb)) { skip -= unix_skb_len(skb); last = skb; last_len = skb->len; skb = skb_peek_next(skb, &sk->sk_receive_queue); if (!skb) goto again; } unix_state_unlock(sk); if (check_creds) { /* Never glue messages from different writers */ if (!unix_skb_scm_eq(skb, &scm)) break; } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { /* Copy credentials */ scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); unix_set_secdata(&scm, skb); check_creds = true; } /* Copy address just once */ if (state->msg && state->msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, state->msg->msg_name); unix_copy_addr(state->msg, skb->sk); sunaddr = NULL; } chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); skb_get(skb); chunk = state->recv_actor(skb, skip, chunk, state); drop_skb = !unix_skb_len(skb); /* skb is only safe to use if !drop_skb */ consume_skb(skb); if (chunk < 0) { if (copied == 0) copied = -EFAULT; break; } copied += chunk; size -= chunk; if (drop_skb) { /* the skb was touched by a concurrent reader; * we should not expect anything from this skb * anymore and assume it invalid - we can be * sure it was dropped from the socket queue * * let's report a short read */ err = 0; break; } /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { UNIXCB(skb).consumed += chunk; sk_peek_offset_bwd(sk, chunk); if (UNIXCB(skb).fp) unix_detach_fds(&scm, skb); if (unix_skb_len(skb)) break; skb_unlink(skb, &sk->sk_receive_queue); consume_skb(skb); if (scm.fp) break; } else { /* It is questionable, see note in unix_dgram_recvmsg. */ if (UNIXCB(skb).fp) scm.fp = scm_fp_dup(UNIXCB(skb).fp); sk_peek_offset_fwd(sk, chunk); if (UNIXCB(skb).fp) break; skip = 0; last = skb; last_len = skb->len; unix_state_lock(sk); skb = skb_peek_next(skb, &sk->sk_receive_queue); if (skb) goto again; unix_state_unlock(sk); break; } } while (size); mutex_unlock(&u->readlock); if (state->msg) scm_recv(sock, state->msg, &scm, flags); else scm_destroy(&scm); out: return copied ? : err; } static int unix_stream_read_actor(struct sk_buff *skb, int skip, int chunk, struct unix_stream_read_state *state) { int ret; ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip, state->msg, chunk); return ret ?: chunk; } static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct unix_stream_read_state state = { .recv_actor = unix_stream_read_actor, .socket = sock, .msg = msg, .size = size, .flags = flags }; return unix_stream_read_generic(&state); } static ssize_t skb_unix_socket_splice(struct sock *sk, struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { int ret; struct unix_sock *u = unix_sk(sk); mutex_unlock(&u->readlock); ret = splice_to_pipe(pipe, spd); mutex_lock(&u->readlock); return ret; } static int unix_stream_splice_actor(struct sk_buff *skb, int skip, int chunk, struct unix_stream_read_state *state) { return skb_splice_bits(skb, state->socket->sk, UNIXCB(skb).consumed + skip, state->pipe, chunk, state->splice_flags, skb_unix_socket_splice); } static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t size, unsigned int flags) { struct unix_stream_read_state state = { .recv_actor = unix_stream_splice_actor, .socket = sock, .pipe = pipe, .size = size, .splice_flags = flags, }; if (unlikely(*ppos)) return -ESPIPE; if (sock->file->f_flags & O_NONBLOCK || flags & SPLICE_F_NONBLOCK) state.flags = MSG_DONTWAIT; return unix_stream_read_generic(&state); } static int unix_shutdown(struct socket *sock, int mode) { struct sock *sk = sock->sk; struct sock *other; if (mode < SHUT_RD || mode > SHUT_RDWR) return -EINVAL; /* This maps: * SHUT_RD (0) -> RCV_SHUTDOWN (1) * SHUT_WR (1) -> SEND_SHUTDOWN (2) * SHUT_RDWR (2) -> SHUTDOWN_MASK (3) */ ++mode; unix_state_lock(sk); sk->sk_shutdown |= mode; other = unix_peer(sk); if (other) sock_hold(other); unix_state_unlock(sk); sk->sk_state_change(sk); if (other && (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { int peer_mode = 0; if (mode&RCV_SHUTDOWN) peer_mode |= SEND_SHUTDOWN; if (mode&SEND_SHUTDOWN) peer_mode |= RCV_SHUTDOWN; unix_state_lock(other); other->sk_shutdown |= peer_mode; unix_state_unlock(other); other->sk_state_change(other); if (peer_mode == SHUTDOWN_MASK) sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); else if (peer_mode & RCV_SHUTDOWN) sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); } if (other) sock_put(other); return 0; } long unix_inq_len(struct sock *sk) { struct sk_buff *skb; long amount = 0; if (sk->sk_state == TCP_LISTEN) return -EINVAL; spin_lock(&sk->sk_receive_queue.lock); if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { skb_queue_walk(&sk->sk_receive_queue, skb) amount += unix_skb_len(skb); } else { skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; } spin_unlock(&sk->sk_receive_queue.lock); return amount; } EXPORT_SYMBOL_GPL(unix_inq_len); long unix_outq_len(struct sock *sk) { return sk_wmem_alloc_get(sk); } EXPORT_SYMBOL_GPL(unix_outq_len); static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; long amount = 0; int err; switch (cmd) { case SIOCOUTQ: amount = unix_outq_len(sk); err = put_user(amount, (int __user *)arg); break; case SIOCINQ: amount = unix_inq_len(sk); if (amount < 0) err = amount; else err = put_user(amount, (int __user *)arg); break; default: err = -ENOIOCTLCMD; break; } return err; } static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask; sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; /* exceptional events? */ if (sk->sk_err) mask |= POLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* Connection-based need to check for termination and startup */ if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE) mask |= POLLHUP; /* * we set writable also when the other side has shut down the * connection. This prevents stuck sockets. */ if (unix_writable(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; return mask; } static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk, *other; unsigned int mask, writable; sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; /* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* Connection-based need to check for termination and startup */ if (sk->sk_type == SOCK_SEQPACKET) { if (sk->sk_state == TCP_CLOSE) mask |= POLLHUP; /* connection hasn't started yet? */ if (sk->sk_state == TCP_SYN_SENT) return mask; } /* No write status requested, avoid expensive OUT tests. */ if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT))) return mask; writable = unix_writable(sk); if (writable) { unix_state_lock(sk); other = unix_peer(sk); if (other && unix_peer(other) != sk && unix_recvq_full(other) && unix_dgram_peer_wake_me(sk, other)) writable = 0; unix_state_unlock(sk); } if (writable) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); return mask; } #ifdef CONFIG_PROC_FS #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1) #define get_bucket(x) ((x) >> BUCKET_SPACE) #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1)) #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos) { unsigned long offset = get_offset(*pos); unsigned long bucket = get_bucket(*pos); struct sock *sk; unsigned long count = 0; for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) { if (sock_net(sk) != seq_file_net(seq)) continue; if (++count == offset) break; } return sk; } static struct sock *unix_next_socket(struct seq_file *seq, struct sock *sk, loff_t *pos) { unsigned long bucket; while (sk > (struct sock *)SEQ_START_TOKEN) { sk = sk_next(sk); if (!sk) goto next_bucket; if (sock_net(sk) == seq_file_net(seq)) return sk; } do { sk = unix_from_bucket(seq, pos); if (sk) return sk; next_bucket: bucket = get_bucket(*pos) + 1; *pos = set_bucket_offset(bucket, 1); } while (bucket < ARRAY_SIZE(unix_socket_table)); return NULL; } static void *unix_seq_start(struct seq_file *seq, loff_t *pos) __acquires(unix_table_lock) { spin_lock(&unix_table_lock); if (!*pos) return SEQ_START_TOKEN; if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table)) return NULL; return unix_next_socket(seq, NULL, pos); } static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return unix_next_socket(seq, v, pos); } static void unix_seq_stop(struct seq_file *seq, void *v) __releases(unix_table_lock) { spin_unlock(&unix_table_lock); } static int unix_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Num RefCount Protocol Flags Type St " "Inode Path\n"); else { struct sock *s = v; struct unix_sock *u = unix_sk(s); unix_state_lock(s); seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu", s, atomic_read(&s->sk_refcnt), 0, s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0, s->sk_type, s->sk_socket ? (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) : (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), sock_i_ino(s)); if (u->addr) { int i, len; seq_putc(seq, ' '); i = 0; len = u->addr->len - sizeof(short); if (!UNIX_ABSTRACT(s)) len--; else { seq_putc(seq, '@'); i++; } for ( ; i < len; i++) seq_putc(seq, u->addr->name->sun_path[i]); } unix_state_unlock(s); seq_putc(seq, '\n'); } return 0; } static const struct seq_operations unix_seq_ops = { .start = unix_seq_start, .next = unix_seq_next, .stop = unix_seq_stop, .show = unix_seq_show, }; static int unix_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &unix_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations unix_seq_fops = { .owner = THIS_MODULE, .open = unix_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif static const struct net_proto_family unix_family_ops = { .family = PF_UNIX, .create = unix_create, .owner = THIS_MODULE, }; static int __net_init unix_net_init(struct net *net) { int error = -ENOMEM; net->unx.sysctl_max_dgram_qlen = 10; if (unix_sysctl_register(net)) goto out; #ifdef CONFIG_PROC_FS if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) { unix_sysctl_unregister(net); goto out; } #endif error = 0; out: return error; } static void __net_exit unix_net_exit(struct net *net) { unix_sysctl_unregister(net); remove_proc_entry("unix", net->proc_net); } static struct pernet_operations unix_net_ops = { .init = unix_net_init, .exit = unix_net_exit, }; static int __init af_unix_init(void) { int rc = -1; BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb)); rc = proto_register(&unix_proto, 1); if (rc != 0) { pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); goto out; } sock_register(&unix_family_ops); register_pernet_subsys(&unix_net_ops); out: return rc; } static void __exit af_unix_exit(void) { sock_unregister(PF_UNIX); proto_unregister(&unix_proto); unregister_pernet_subsys(&unix_net_ops); } /* Earlier than device_initcall() so that other drivers invoking request_module() don't end up in a loop when modprobe tries to use a UNIX socket. But later than subsys_initcall() because we depend on stuff initialised there */ fs_initcall(af_unix_init); module_exit(af_unix_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_UNIX);
./CrossVul/dataset_final_sorted/CWE-399/c/good_4966_3
crossvul-cpp_data_bad_2289_0
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2002 Intel Corp. * * This file is part of the SCTP kernel implementation * * These functions work with the state functions in sctp_sm_statefuns.c * to implement the state operations. These functions implement the * steps which require modifying existing data structures. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * C. Robin <chris@hundredacre.ac.uk> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Dajiang Zhang <dajiang.zhang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/skbuff.h> #include <linux/random.h> /* for get_random_bytes */ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen); static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len); static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp); static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data); static void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data); /* Control chunk destructor */ static void sctp_control_release_owner(struct sk_buff *skb) { /*TODO: do memory release */ } static void sctp_control_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sk_buff *skb = chunk->skb; /* TODO: properly account for control chunks. * To do it right we'll need: * 1) endpoint if association isn't known. * 2) proper memory accounting. * * For now don't do anything for now. */ skb->sk = asoc ? asoc->base.sk : NULL; skb->destructor = sctp_control_release_owner; } /* What was the inbound interface for this chunk? */ int sctp_chunk_iif(const struct sctp_chunk *chunk) { struct sctp_af *af; int iif = 0; af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version)); if (af) iif = af->skb_iif(chunk->skb); return iif; } /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 2: The ECN capable field is reserved for future use of * Explicit Congestion Notification. */ static const struct sctp_paramhdr ecap_param = { SCTP_PARAM_ECN_CAPABLE, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; static const struct sctp_paramhdr prsctp_param = { SCTP_PARAM_FWD_TSN_SUPPORT, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. */ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); } /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. Differs from sctp_init_cause in that it won't oops * if there isn't enough space in the op error chunk */ static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); if (skb_tailroom(chunk->skb) < len) return -ENOSPC; chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, sizeof(sctp_errhdr_t), &err); return 0; } /* 3.3.2 Initiation (INIT) (1) * * This chunk is used to initiate a SCTP association between two * endpoints. The format of the INIT chunk is shown below: * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initiate Tag | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Advertised Receiver Window Credit (a_rwnd) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Outbound Streams | Number of Inbound Streams | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initial TSN | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / Optional/Variable-Length Parameters / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * * The INIT chunk contains the following parameters. Unless otherwise * noted, each parameter MUST only be included once in the INIT chunk. * * Fixed Parameters Status * ---------------------------------------------- * Initiate Tag Mandatory * Advertised Receiver Window Credit Mandatory * Number of Outbound Streams Mandatory * Number of Inbound Streams Mandatory * Initial TSN Mandatory * * Variable Parameters Status Type Value * ------------------------------------------------------------- * IPv4 Address (Note 1) Optional 5 * IPv6 Address (Note 1) Optional 6 * Cookie Preservative Optional 9 * Reserved for ECN Capable (Note 2) Optional 32768 (0x8000) * Host Name Address (Note 3) Optional 11 * Supported Address Types (Note 4) Optional 12 */ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, const struct sctp_bind_addr *bp, gfp_t gfp, int vparam_len) { struct net *net = sock_net(asoc->base.sk); struct sctp_endpoint *ep = asoc->ep; sctp_inithdr_t init; union sctp_params addrs; size_t chunksize; struct sctp_chunk *retval = NULL; int num_types, addrs_len = 0; struct sctp_sock *sp; sctp_supported_addrs_param_t sat; __be16 types[2]; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL; /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 1: The INIT chunks can contain multiple addresses that * can be IPv4 and/or IPv6 in any combination. */ retval = NULL; /* Convert the provided bind address list to raw format. */ addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); init.init_tag = htonl(asoc->c.my_vtag); init.a_rwnd = htonl(asoc->rwnd); init.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); init.num_inbound_streams = htons(asoc->c.sinit_max_instreams); init.initial_tsn = htonl(asoc->c.initial_tsn); /* How many address types are needed? */ sp = sctp_sk(asoc->base.sk); num_types = sp->pf->supported_addrs(sp, types); chunksize = sizeof(init) + addrs_len; chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); chunksize += sizeof(ecap_param); if (net->sctp.prsctp_enable) chunksize += sizeof(prsctp_param); /* ADDIP: Section 4.2.7: * An implementation supporting this extension [ADDIP] MUST list * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and * INIT-ACK parameters. */ if (net->sctp.addip_enable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); chunksize += vparam_len; /* Account for AUTH related parameters */ if (ep->auth_enable) { /* Add random parameter length*/ chunksize += sizeof(asoc->c.auth_random); /* Add HMACS parameter length if any were defined */ auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; /* Add CHUNKS parameter length */ auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } /* If we have any extensions to report, account for that */ if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 3: An INIT chunk MUST NOT contain more than one Host * Name address parameter. Moreover, the sender of the INIT * MUST NOT combine any other address types with the Host Name * address in the INIT. The receiver of INIT MUST ignore any * other address types if the Host Name address parameter is * present in the received INIT chunk. * * PLEASE DO NOT FIXME [This version does not support Host Name.] */ retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize); if (!retval) goto nodata; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(init), &init); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 4: This parameter, when present, specifies all the * address types the sending endpoint can support. The absence * of this parameter indicates that the sending endpoint can * support any address type. */ sat.param_hdr.type = SCTP_PARAM_SUPPORTED_ADDRESS_TYPES; sat.param_hdr.length = htons(SCTP_SAT_LEN(num_types)); sctp_addto_chunk(retval, sizeof(sat), &sat); sctp_addto_chunk(retval, num_types * sizeof(__u16), &types); sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); /* Add the supported extensions parameter. Be nice and add this * fist before addiding the parameters for the extensions themselves */ if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (net->sctp.prsctp_enable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } /* Add SCTP-AUTH chunks to the parameter list */ if (ep->auth_enable) { sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), asoc->c.auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } nodata: kfree(addrs.v); return retval; } struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, gfp_t gfp, int unkparam_len) { sctp_inithdr_t initack; struct sctp_chunk *retval; union sctp_params addrs; struct sctp_sock *sp; int addrs_len; sctp_cookie_param_t *cookie; int cookie_len; size_t chunksize; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL, *auth_random = NULL; retval = NULL; /* Note: there may be no addresses to embed. */ addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); initack.init_tag = htonl(asoc->c.my_vtag); initack.a_rwnd = htonl(asoc->rwnd); initack.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); initack.num_inbound_streams = htons(asoc->c.sinit_max_instreams); initack.initial_tsn = htonl(asoc->c.initial_tsn); /* FIXME: We really ought to build the cookie right * into the packet instead of allocating more fresh memory. */ cookie = sctp_pack_cookie(asoc->ep, asoc, chunk, &cookie_len, addrs.v, addrs_len); if (!cookie) goto nomem_cookie; /* Calculate the total size of allocation, include the reserved * space for reporting unknown parameters if it is specified. */ sp = sctp_sk(asoc->base.sk); chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; /* Tell peer that we'll do ECN only if peer advertised such cap. */ if (asoc->peer.ecn_capable) chunksize += sizeof(ecap_param); if (asoc->peer.prsctp_capable) chunksize += sizeof(prsctp_param); if (asoc->peer.asconf_capable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); if (asoc->peer.auth_capable) { auth_random = (sctp_paramhdr_t *)asoc->c.auth_random; chunksize += ntohs(auth_random->length); auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* Now allocate and fill out the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize); if (!retval) goto nomem_chunk; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * * [INIT ACK back to where the INIT came from.] */ retval->transport = chunk->transport; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(initack), &initack); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); sctp_addto_chunk(retval, cookie_len, cookie); if (asoc->peer.ecn_capable) sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (asoc->peer.prsctp_capable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } if (asoc->peer.auth_capable) { sctp_addto_chunk(retval, ntohs(auth_random->length), auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } /* We need to remove the const qualifier at this point. */ retval->asoc = (struct sctp_association *) asoc; nomem_chunk: kfree(cookie); nomem_cookie: kfree(addrs.v); return retval; } /* 3.3.11 Cookie Echo (COOKIE ECHO) (10): * * This chunk is used only during the initialization of an association. * It is sent by the initiator of an association to its peer to complete * the initialization process. This chunk MUST precede any DATA chunk * sent within the association, but MAY be bundled with one or more DATA * chunks in the same packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 10 |Chunk Flags | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * / Cookie / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bit * * Set to zero on transmit and ignored on receipt. * * Length: 16 bits (unsigned integer) * * Set to the size of the chunk in bytes, including the 4 bytes of * the chunk header and the size of the Cookie. * * Cookie: variable size * * This field must contain the exact cookie received in the * State Cookie parameter from the previous INIT ACK. * * An implementation SHOULD make the cookie as small as possible * to insure interoperability. */ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; void *cookie; int cookie_len; cookie = asoc->peer.cookie; cookie_len = asoc->peer.cookie_len; /* Build a cookie echo chunk. */ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); if (!retval) goto nodata; retval->subh.cookie_hdr = sctp_addto_chunk(retval, cookie_len, cookie); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ECHO back to where the INIT ACK came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* 3.3.12 Cookie Acknowledgement (COOKIE ACK) (11): * * This chunk is used only during the initialization of an * association. It is used to acknowledge the receipt of a COOKIE * ECHO chunk. This chunk MUST precede any DATA or SACK chunk sent * within the association, but MAY be bundled with one or more DATA * chunks or SACK chunk in the same SCTP packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 11 |Chunk Flags | Length = 4 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bits * * Set to zero on transmit and ignored on receipt. */ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ACK back to where the COOKIE ECHO came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* * Appendix A: Explicit Congestion Notification: * CWR: * * RFC 2481 details a specific bit for a sender to send in the header of * its next outbound TCP segment to indicate to its peer that it has * reduced its congestion window. This is termed the CWR bit. For * SCTP the same indication is made by including the CWR chunk. * This chunk contains one data element, i.e. the TSN number that * was sent in the ECNE chunk. This element represents the lowest * TSN number in the datagram that was originally marked with the * CE bit. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Chunk Type=13 | Flags=00000000| Chunk Length = 8 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Lowest TSN Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Note: The CWR is considered a Control chunk. */ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, const __u32 lowest_tsn, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_cwrhdr_t cwr; cwr.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0, sizeof(sctp_cwrhdr_t)); if (!retval) goto nodata; retval->subh.ecn_cwr_hdr = sctp_addto_chunk(retval, sizeof(cwr), &cwr); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report a reduced congestion window back to where the ECNE * came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Make an ECNE chunk. This is a congestion experienced report. */ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, const __u32 lowest_tsn) { struct sctp_chunk *retval; sctp_ecnehdr_t ecne; ecne.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0, sizeof(sctp_ecnehdr_t)); if (!retval) goto nodata; retval->subh.ecne_hdr = sctp_addto_chunk(retval, sizeof(ecne), &ecne); nodata: return retval; } /* Make a DATA chunk for the given association from the provided * parameters. However, do not populate the data payload. */ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, int data_len, __u8 flags, __u16 ssn) { struct sctp_chunk *retval; struct sctp_datahdr dp; int chunk_len; /* We assign the TSN as LATE as possible, not here when * creating the chunk. */ dp.tsn = 0; dp.stream = htons(sinfo->sinfo_stream); dp.ppid = sinfo->sinfo_ppid; /* Set the flags for an unordered send. */ if (sinfo->sinfo_flags & SCTP_UNORDERED) { flags |= SCTP_DATA_UNORDERED; dp.ssn = 0; } else dp.ssn = htons(ssn); chunk_len = sizeof(dp) + data_len; retval = sctp_make_data(asoc, flags, chunk_len); if (!retval) goto nodata; retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); nodata: return retval; } /* Create a selective ackowledgement (SACK) for the given * association. This reports on which TSN's we've seen to date, * including duplicates and gaps. */ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_sackhdr sack; int len; __u32 ctsn; __u16 num_gabs, num_dup_tsns; struct sctp_association *aptr = (struct sctp_association *)asoc; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; struct sctp_transport *trans; memset(gabs, 0, sizeof(gabs)); ctsn = sctp_tsnmap_get_ctsn(map); pr_debug("%s: sackCTSNAck sent:0x%x\n", __func__, ctsn); /* How much room is needed in the chunk? */ num_gabs = sctp_tsnmap_num_gabs(map, gabs); num_dup_tsns = sctp_tsnmap_num_dups(map); /* Initialize the SACK header. */ sack.cum_tsn_ack = htonl(ctsn); sack.a_rwnd = htonl(asoc->a_rwnd); sack.num_gap_ack_blocks = htons(num_gabs); sack.num_dup_tsns = htons(num_dup_tsns); len = sizeof(sack) + sizeof(struct sctp_gap_ack_block) * num_gabs + sizeof(__u32) * num_dup_tsns; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk to * which it is replying. This rule should also be followed if * the endpoint is bundling DATA chunks together with the * reply chunk. * * However, when acknowledging multiple DATA chunks received * in packets from different source addresses in a single * SACK, the SACK chunk may be transmitted to one of the * destination transport addresses from which the DATA or * control chunks being acknowledged were received. * * [BUG: We do not implement the following paragraph. * Perhaps we should remember the last transport we used for a * SACK and avoid that (if possible) if we have seen any * duplicates. --piggy] * * When a receiver of a duplicate DATA chunk sends a SACK to a * multi- homed endpoint it MAY be beneficial to vary the * destination address and not use the source address of the * DATA chunk. The reason being that receiving a duplicate * from a multi-homed endpoint might indicate that the return * path (as specified in the source address of the DATA chunk) * for the SACK is broken. * * [Send to the address from which we last received a DATA chunk.] */ retval->transport = asoc->peer.last_data_from; retval->subh.sack_hdr = sctp_addto_chunk(retval, sizeof(sack), &sack); /* Add the gap ack block information. */ if (num_gabs) sctp_addto_chunk(retval, sizeof(__u32) * num_gabs, gabs); /* Add the duplicate TSN information. */ if (num_dup_tsns) { aptr->stats.idupchunks += num_dup_tsns; sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, sctp_tsnmap_get_dups(map)); } /* Once we have a sack generated, check to see what our sack * generation is, if its 0, reset the transports to 0, and reset * the association generation to 1 * * The idea is that zero is never used as a valid generation for the * association so no transport will match after a wrap event like this, * Until the next sack */ if (++aptr->peer.sack_generation == 0) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) trans->sack_generation = 0; aptr->peer.sack_generation = 1; } nodata: return retval; } /* Make a SHUTDOWN chunk. */ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_shutdownhdr_t shut; __u32 ctsn; ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, sizeof(sctp_shutdownhdr_t)); if (!retval) goto nodata; retval->subh.shutdown_hdr = sctp_addto_chunk(retval, sizeof(shut), &shut); if (chunk) retval->transport = chunk->transport; nodata: return retval; } struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ACK back to where the SHUTDOWN came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } struct sctp_chunk *sctp_make_shutdown_complete( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association (vtag will be * reflected) */ flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report SHUTDOWN COMPLETE back to where the SHUTDOWN ACK * came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Create an ABORT. Note that we set the T bit if we have no * association, except when responding to an INIT (sctpimpguide 2.41). */ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const size_t hint) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association and 'chunk' is not * an INIT (vtag will be reflected). */ if (!asoc) { if (chunk && chunk->chunk_hdr && chunk->chunk_hdr->type == SCTP_CID_INIT) flags = 0; else flags = SCTP_CHUNK_FLAG_T; } retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Helper to create ABORT with a NO_USER_DATA error. */ struct sctp_chunk *sctp_make_abort_no_data( const struct sctp_association *asoc, const struct sctp_chunk *chunk, __u32 tsn) { struct sctp_chunk *retval; __be32 payload; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + sizeof(tsn)); if (!retval) goto no_mem; /* Put the tsn back into network byte order. */ payload = htonl(tsn); sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (chunk) retval->transport = chunk->transport; no_mem: return retval; } /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, const struct msghdr *msg, size_t paylen) { struct sctp_chunk *retval; void *payload = NULL; int err; retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); if (!retval) goto err_chunk; if (paylen) { /* Put the msg_iov together into payload. */ payload = kmalloc(paylen, GFP_KERNEL); if (!payload) goto err_payload; err = memcpy_fromiovec(payload, msg->msg_iov, paylen); if (err < 0) goto err_copy; } sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); sctp_addto_chunk(retval, paylen, payload); if (paylen) kfree(payload); return retval; err_copy: kfree(payload); err_payload: sctp_chunk_free(retval); retval = NULL; err_chunk: return retval; } /* Append bytes to the end of a parameter. Will panic if chunk is not big * enough. */ static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) { void *target; int chunklen = ntohs(chunk->chunk_hdr->length); target = skb_put(chunk->skb, len); if (data) memcpy(target, data, len); else memset(target, 0, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ struct sctp_chunk *sctp_make_abort_violation( const struct sctp_association *asoc, const struct sctp_chunk *chunk, const __u8 *payload, const size_t paylen) { struct sctp_chunk *retval; struct sctp_paramhdr phdr; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen + sizeof(sctp_paramhdr_t)); if (!retval) goto end; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen + sizeof(sctp_paramhdr_t)); phdr.type = htons(chunk->chunk_hdr->type); phdr.length = chunk->chunk_hdr->length; sctp_addto_chunk(retval, paylen, payload); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); end: return retval; } struct sctp_chunk *sctp_make_violation_paramlen( const struct sctp_association *asoc, const struct sctp_chunk *chunk, struct sctp_paramhdr *param) { struct sctp_chunk *retval; static const char error[] = "The following parameter had invalid length:"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) + sizeof(sctp_paramhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error) + sizeof(sctp_paramhdr_t)); sctp_addto_chunk(retval, sizeof(error), error); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), param); nodata: return retval; } struct sctp_chunk *sctp_make_violation_max_retrans( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; static const char error[] = "Association exceeded its max_retans count"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error)); sctp_addto_chunk(retval, sizeof(error), error); nodata: return retval; } /* Make a HEARTBEAT chunk. */ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, const struct sctp_transport *transport) { struct sctp_chunk *retval; sctp_sender_hb_info_t hbinfo; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo)); if (!retval) goto nodata; hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO; hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); hbinfo.daddr = transport->ipaddr; hbinfo.sent_at = jiffies; hbinfo.hb_nonce = transport->hb_nonce; /* Cast away the 'const', as this is just telling the chunk * what transport it belongs to. */ retval->transport = (struct sctp_transport *) transport; retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo), &hbinfo); nodata: return retval; } struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const void *payload, const size_t paylen) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); if (!retval) goto nodata; retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [HBACK back to where the HEARTBEAT came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk with the specified space reserved. * This routine can be used for containing multiple causes in the chunk. */ static struct sctp_chunk *sctp_make_op_error_space( const struct sctp_association *asoc, const struct sctp_chunk *chunk, size_t size) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, sizeof(sctp_errhdr_t) + size); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk of a fixed size, * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) * This is a helper function to allocate an error chunk for * for those invalid parameter codes in which we may not want * to report all the errors, if the incoming chunk is large */ static inline struct sctp_chunk *sctp_make_op_error_fixed( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { size_t size = asoc ? asoc->pathmtu : 0; if (!size) size = SCTP_DEFAULT_MAXSEGMENT; return sctp_make_op_error_space(asoc, chunk, size); } /* Create an Operation Error chunk. */ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, const struct sctp_chunk *chunk, __be16 cause_code, const void *payload, size_t paylen, size_t reserve_tail) { struct sctp_chunk *retval; retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail); if (!retval) goto nodata; sctp_init_cause(retval, cause_code, paylen + reserve_tail); sctp_addto_chunk(retval, paylen, payload); if (reserve_tail) sctp_addto_param(retval, reserve_tail, NULL); nodata: return retval; } struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_hmac *hmac_desc; struct sctp_authhdr auth_hdr; __u8 *hmac; /* Get the first hmac that the peer told us to use */ hmac_desc = sctp_auth_asoc_get_hmac(asoc); if (unlikely(!hmac_desc)) return NULL; retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0, hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); if (!retval) return NULL; auth_hdr.hmac_id = htons(hmac_desc->hmac_id); auth_hdr.shkey_id = htons(asoc->active_key_id); retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t), &auth_hdr); hmac = skb_put(retval->skb, hmac_desc->hmac_len); memset(hmac, 0, hmac_desc->hmac_len); /* Adjust the chunk header to include the empty MAC */ retval->chunk_hdr->length = htons(ntohs(retval->chunk_hdr->length) + hmac_desc->hmac_len); retval->chunk_end = skb_tail_pointer(retval->skb); return retval; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* Turn an skb into a chunk. * FIXME: Eventually move the structure directly inside the skb->cb[]. * * sctpimpguide-05.txt Section 2.8.2 * M1) Each time a new DATA chunk is transmitted * set the 'TSN.Missing.Report' count for that TSN to 0. The * 'TSN.Missing.Report' count will be used to determine missing chunks * and when to fast retransmit. * */ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, const struct sctp_association *asoc, struct sock *sk) { struct sctp_chunk *retval; retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC); if (!retval) goto nodata; if (!sk) pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb); INIT_LIST_HEAD(&retval->list); retval->skb = skb; retval->asoc = (struct sctp_association *)asoc; retval->singleton = 1; retval->fast_retransmit = SCTP_CAN_FRTX; /* Polish the bead hole. */ INIT_LIST_HEAD(&retval->transmitted_list); INIT_LIST_HEAD(&retval->frag_list); SCTP_DBG_OBJCNT_INC(chunk); atomic_set(&retval->refcnt, 1); nodata: return retval; } /* Set chunk->source and dest based on the IP header in chunk->skb. */ void sctp_init_addrs(struct sctp_chunk *chunk, union sctp_addr *src, union sctp_addr *dest) { memcpy(&chunk->source, src, sizeof(union sctp_addr)); memcpy(&chunk->dest, dest, sizeof(union sctp_addr)); } /* Extract the source address from a chunk. */ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) { /* If we have a known transport, use that. */ if (chunk->transport) { return &chunk->transport->ipaddr; } else { /* Otherwise, extract it from the IP header. */ return &chunk->source; } } /* Create a new chunk, setting the type and flags headers from the * arguments, reserving enough space for a 'paylen' byte payload. */ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *retval; sctp_chunkhdr_t *chunk_hdr; struct sk_buff *skb; struct sock *sk; /* No need to allocate LL here, as this is only a chunk. */ skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), GFP_ATOMIC); if (!skb) goto nodata; /* Make room for the chunk header. */ chunk_hdr = (sctp_chunkhdr_t *)skb_put(skb, sizeof(sctp_chunkhdr_t)); chunk_hdr->type = type; chunk_hdr->flags = flags; chunk_hdr->length = htons(sizeof(sctp_chunkhdr_t)); sk = asoc ? asoc->base.sk : NULL; retval = sctp_chunkify(skb, asoc, sk); if (!retval) { kfree_skb(skb); goto nodata; } retval->chunk_hdr = chunk_hdr; retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(struct sctp_chunkhdr); /* Determine if the chunk needs to be authenticated */ if (sctp_auth_send_cid(type, asoc)) retval->auth = 1; return retval; nodata: return NULL; } static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen) { return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen); } static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen); if (chunk) sctp_control_set_owner_w(chunk); return chunk; } /* Release the memory occupied by a chunk. */ static void sctp_chunk_destroy(struct sctp_chunk *chunk) { BUG_ON(!list_empty(&chunk->list)); list_del_init(&chunk->transmitted_list); consume_skb(chunk->skb); consume_skb(chunk->auth_chunk); SCTP_DBG_OBJCNT_DEC(chunk); kmem_cache_free(sctp_chunk_cachep, chunk); } /* Possibly, free the chunk. */ void sctp_chunk_free(struct sctp_chunk *chunk) { /* Release our reference on the message tracker. */ if (chunk->msg) sctp_datamsg_put(chunk->msg); sctp_chunk_put(chunk); } /* Grab a reference to the chunk. */ void sctp_chunk_hold(struct sctp_chunk *ch) { atomic_inc(&ch->refcnt); } /* Release a reference to the chunk. */ void sctp_chunk_put(struct sctp_chunk *ch) { if (atomic_dec_and_test(&ch->refcnt)) sctp_chunk_destroy(ch); } /* Append bytes to the end of a chunk. Will panic if chunk is not big * enough. */ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) { void *target; void *padding; int chunklen = ntohs(chunk->chunk_hdr->length); int padlen = WORD_ROUND(chunklen) - chunklen; padding = skb_put(chunk->skb, padlen); target = skb_put(chunk->skb, len); memset(padding, 0, padlen); memcpy(target, data, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + padlen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient * space in the chunk */ static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, int len, const void *data) { if (skb_tailroom(chunk->skb) >= len) return sctp_addto_chunk(chunk, len, data); else return NULL; } /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. */ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, struct iovec *data) { __u8 *target; int err = 0; /* Make room in chunk for data. */ target = skb_put(chunk->skb, len); /* Copy data (whole iovec) into chunk */ if ((err = memcpy_fromiovecend(target, data, off, len))) goto out; /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(ntohs(chunk->chunk_hdr->length) + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); out: return err; } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) { struct sctp_datamsg *msg; struct sctp_chunk *lchunk; struct sctp_stream *stream; __u16 ssn; __u16 sid; if (chunk->has_ssn) return; /* All fragments will be on the same stream */ sid = ntohs(chunk->subh.data_hdr->stream); stream = &chunk->asoc->ssnmap->out; /* Now assign the sequence number to the entire message. * All fragments must have the same stream sequence number. */ msg = chunk->msg; list_for_each_entry(lchunk, &msg->chunks, frag_list) { if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { ssn = 0; } else { if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) ssn = sctp_ssn_next(stream, sid); else ssn = sctp_ssn_peek(stream, sid); } lchunk->subh.data_hdr->ssn = htons(ssn); lchunk->has_ssn = 1; } } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) { if (!chunk->has_tsn) { /* This is the last possible instant to * assign a TSN. */ chunk->subh.data_hdr->tsn = htonl(sctp_association_get_next_tsn(chunk->asoc)); chunk->has_tsn = 1; } } /* Create a CLOSED association to use with an incoming packet. */ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_association *asoc; struct sk_buff *skb; sctp_scope_t scope; struct sctp_af *af; /* Create the bare association. */ scope = sctp_scope(sctp_source(chunk)); asoc = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!asoc) goto nodata; asoc->temp = 1; skb = chunk->skb; /* Create an entry for the source address of the packet. */ af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version)); if (unlikely(!af)) goto fail; af->from_skb(&asoc->c.peer_addr, skb, 1); nodata: return asoc; fail: sctp_association_free(asoc); return NULL; } /* Build a cookie representing asoc. * This INCLUDES the param header needed to put the cookie in the INIT ACK. */ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len) { sctp_cookie_param_t *retval; struct sctp_signed_cookie *cookie; struct scatterlist sg; int headersize, bodysize; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_paramhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = sizeof(struct sctp_cookie) + ntohs(init_chunk->chunk_hdr->length) + addrs_len; /* Pad out the cookie to a multiple to make the signature * functions simpler to write. */ if (bodysize % SCTP_COOKIE_MULTIPLE) bodysize += SCTP_COOKIE_MULTIPLE - (bodysize % SCTP_COOKIE_MULTIPLE); *cookie_len = headersize + bodysize; /* Clear this memory since we are sending this data structure * out on the network. */ retval = kzalloc(*cookie_len, GFP_ATOMIC); if (!retval) goto nodata; cookie = (struct sctp_signed_cookie *) retval->body; /* Set up the parameter header. */ retval->p.type = SCTP_PARAM_STATE_COOKIE; retval->p.length = htons(*cookie_len); /* Copy the cookie part of the association itself. */ cookie->c = asoc->c; /* Save the raw address list length in the cookie. */ cookie->c.raw_addr_list_len = addrs_len; /* Remember PR-SCTP capability. */ cookie->c.prsctp_capable = asoc->peer.prsctp_capable; /* Save adaptation indication in the cookie. */ cookie->c.adaptation_ind = asoc->peer.adaptation_ind; /* Set an expiration time for the cookie. */ cookie->c.expiration = ktime_add(asoc->cookie_life, ktime_get()); /* Copy the peer's init packet. */ memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, ntohs(init_chunk->chunk_hdr->length)); /* Copy the raw local address list of the association. */ memcpy((__u8 *)&cookie->c.peer_init[0] + ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); if (sctp_sk(ep->base.sk)->hmac) { struct hash_desc desc; /* Sign the message. */ sg_init_one(&sg, &cookie->c, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) goto free_cookie; } return retval; free_cookie: kfree(retval); nodata: *cookie_len = 0; return NULL; } /* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ struct sctp_association *sctp_unpack_cookie( const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, gfp_t gfp, int *error, struct sctp_chunk **errp) { struct sctp_association *retval = NULL; struct sctp_signed_cookie *cookie; struct sctp_cookie *bear_cookie; int headersize, bodysize, fixed_size; __u8 *digest = ep->digest; struct scatterlist sg; unsigned int len; sctp_scope_t scope; struct sk_buff *skb = chunk->skb; ktime_t kt; struct hash_desc desc; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_chunkhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = ntohs(chunk->chunk_hdr->length) - headersize; fixed_size = headersize + sizeof(struct sctp_cookie); /* Verify that the chunk looks like it even has a cookie. * There must be enough room for our cookie and our peer's * INIT chunk. */ len = ntohs(chunk->chunk_hdr->length); if (len < fixed_size + sizeof(struct sctp_chunkhdr)) goto malformed; /* Verify that the cookie has been padded out. */ if (bodysize % SCTP_COOKIE_MULTIPLE) goto malformed; /* Process the cookie. */ cookie = chunk->subh.cookie_hdr; bear_cookie = &cookie->c; if (!sctp_sk(ep->base.sk)->hmac) goto no_hmac; /* Check the signature. */ sg_init_one(&sg, bear_cookie, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; memset(digest, 0x00, SCTP_SIGNATURE_SIZE); if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, digest)) { *error = -SCTP_IERROR_NOMEM; goto fail; } if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { *error = -SCTP_IERROR_BAD_SIG; goto fail; } no_hmac: /* IG Section 2.35.2: * 3) Compare the port numbers and the verification tag contained * within the COOKIE ECHO chunk to the actual port numbers and the * verification tag within the SCTP common header of the received * packet. If these values do not match the packet MUST be silently * discarded, */ if (ntohl(chunk->sctp_hdr->vtag) != bear_cookie->my_vtag) { *error = -SCTP_IERROR_BAD_TAG; goto fail; } if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port || ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { *error = -SCTP_IERROR_BAD_PORTS; goto fail; } /* Check to see if the cookie is stale. If there is already * an association, there is no need to check cookie's expiration * for init collision case of lost COOKIE ACK. * If skb has been timestamped, then use the stamp, otherwise * use current time. This introduces a small possibility that * that a cookie may be considered expired, but his would only slow * down the new association establishment instead of every packet. */ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) kt = skb_get_ktime(skb); else kt = ktime_get(); if (!asoc && ktime_before(bear_cookie->expiration, kt)) { /* * Section 3.3.10.3 Stale Cookie Error (3) * * Cause of error * --------------- * Stale Cookie Error: Indicates the receipt of a valid State * Cookie that has expired. */ len = ntohs(chunk->chunk_hdr->length); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); __be32 n = htonl(usecs); sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, sizeof(n)); sctp_addto_chunk(*errp, sizeof(n), &n); *error = -SCTP_IERROR_STALE_COOKIE; } else *error = -SCTP_IERROR_NOMEM; goto fail; } /* Make a new base association. */ scope = sctp_scope(sctp_source(chunk)); retval = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!retval) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Set up our peer's port number. */ retval->peer.port = ntohs(chunk->sctp_hdr->source); /* Populate the association from the cookie. */ memcpy(&retval->c, bear_cookie, sizeof(*bear_cookie)); if (sctp_assoc_set_bind_addr_from_cookie(retval, bear_cookie, GFP_ATOMIC) < 0) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Also, add the destination address. */ if (list_empty(&retval->base.bind_addr.address_list)) { sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, SCTP_ADDR_SRC, GFP_ATOMIC); } retval->next_tsn = retval->c.initial_tsn; retval->ctsn_ack_point = retval->next_tsn - 1; retval->addip_serial = retval->c.initial_tsn; retval->adv_peer_ack_point = retval->ctsn_ack_point; retval->peer.prsctp_capable = retval->c.prsctp_capable; retval->peer.adaptation_ind = retval->c.adaptation_ind; /* The INIT stuff will be done by the side effects. */ return retval; fail: if (retval) sctp_association_free(retval); return NULL; malformed: /* Yikes! The packet is either corrupt or deliberately * malformed. */ *error = -SCTP_IERROR_MALFORMED; goto fail; } /******************************************************************** * 3rd Level Abstractions ********************************************************************/ struct __sctp_missing { __be32 num_missing; __be16 type; } __packed; /* * Report a missing mandatory parameter. */ static int sctp_process_missing_param(const struct sctp_association *asoc, sctp_param_t paramtype, struct sctp_chunk *chunk, struct sctp_chunk **errp) { struct __sctp_missing report; __u16 len; len = WORD_ROUND(sizeof(report)); /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { report.num_missing = htonl(1); report.type = paramtype; sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, sizeof(report)); sctp_addto_chunk(*errp, sizeof(report), &report); } /* Stop processing this chunk. */ return 0; } /* Report an Invalid Mandatory Parameter. */ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* Invalid Mandatory Parameter Error has no payload. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, 0); if (*errp) sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); /* Stop processing this chunk. */ return 0; } static int sctp_process_inv_paramlength(const struct sctp_association *asoc, struct sctp_paramhdr *param, const struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* This is a fatal error. Any accumulated non-fatal errors are * not reported. */ if (*errp) sctp_chunk_free(*errp); /* Create an error chunk and fill it in with our payload. */ *errp = sctp_make_violation_paramlen(asoc, chunk, param); return 0; } /* Do not attempt to handle the HOST_NAME parm. However, do * send back an indicator to the peer. */ static int sctp_process_hn_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { __u16 len = ntohs(param.p->length); /* Processing of the HOST_NAME parameter will generate an * ABORT. If we've accumulated any non-fatal errors, they * would be unrecognized parameters and we should not include * them in the ABORT. */ if (*errp) sctp_chunk_free(*errp); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); sctp_addto_chunk(*errp, len, param.v); } /* Stop processing this chunk. */ return 0; } static int sctp_verify_ext_param(struct net *net, union sctp_params param) { __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int have_auth = 0; int have_asconf = 0; int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_AUTH: have_auth = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: have_asconf = 1; break; } } /* ADD-IP Security: The draft requires us to ABORT or ignore the * INIT/INIT-ACK if ADD-IP is listed, but AUTH is not. Do this * only if ADD-IP is turned on and we are not backward-compatible * mode. */ if (net->sctp.addip_noauth) return 1; if (net->sctp.addip_enable && !have_auth && have_asconf) return 0; return 1; } static void sctp_process_ext_param(struct sctp_association *asoc, union sctp_params param) { struct net *net = sock_net(asoc->base.sk); __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_FWD_TSN: if (net->sctp.prsctp_enable && !asoc->peer.prsctp_capable) asoc->peer.prsctp_capable = 1; break; case SCTP_CID_AUTH: /* if the peer reports AUTH, assume that he * supports AUTH. */ if (asoc->ep->auth_enable) asoc->peer.auth_capable = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: if (net->sctp.addip_enable) asoc->peer.asconf_capable = 1; break; default: break; } } } /* RFC 3.2.1 & the Implementers Guide 2.2. * * The Parameter Types are encoded such that the * highest-order two bits specify the action that must be * taken if the processing endpoint does not recognize the * Parameter Type. * * 00 - Stop processing this parameter; do not process any further * parameters within this chunk * * 01 - Stop processing this parameter, do not process any further * parameters within this chunk, and report the unrecognized * parameter in an 'Unrecognized Parameter' ERROR chunk. * * 10 - Skip this parameter and continue processing. * * 11 - Skip this parameter and continue processing but * report the unrecognized parameter in an * 'Unrecognized Parameter' ERROR chunk. * * Return value: * SCTP_IERROR_NO_ERROR - continue with the chunk * SCTP_IERROR_ERROR - stop and report an error. * SCTP_IERROR_NOMEME - out of memory. */ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { int retval = SCTP_IERROR_NO_ERROR; switch (param.p->type & SCTP_PARAM_ACTION_MASK) { case SCTP_PARAM_ACTION_DISCARD: retval = SCTP_IERROR_ERROR; break; case SCTP_PARAM_ACTION_SKIP: break; case SCTP_PARAM_ACTION_DISCARD_ERR: retval = SCTP_IERROR_ERROR; /* Fall through */ case SCTP_PARAM_ACTION_SKIP_ERR: /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (NULL == *errp) *errp = sctp_make_op_error_fixed(asoc, chunk); if (*errp) { if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, WORD_ROUND(ntohs(param.p->length)))) sctp_addto_chunk_fixed(*errp, WORD_ROUND(ntohs(param.p->length)), param.v); } else { /* If there is no memory for generating the ERROR * report as specified, an ABORT will be triggered * to the peer and the association won't be * established. */ retval = SCTP_IERROR_NOMEM; } break; default: break; } return retval; } /* Verify variable length parameters * Return values: * SCTP_IERROR_ABORT - trigger an ABORT * SCTP_IERROR_NOMEM - out of memory (abort) * SCTP_IERROR_ERROR - stop processing, trigger an ERROR * SCTP_IERROR_NO_ERROR - continue with the chunk */ static sctp_ierror_t sctp_verify_param(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, union sctp_params param, sctp_cid_t cid, struct sctp_chunk *chunk, struct sctp_chunk **err_chunk) { struct sctp_hmac_algo_param *hmacs; int retval = SCTP_IERROR_NO_ERROR; __u16 n_elt, id = 0; int i; /* FIXME - This routine is not looking at each parameter per the * chunk type, i.e., unrecognized parameters should be further * identified based on the chunk id. */ switch (param.p->type) { case SCTP_PARAM_IPV4_ADDRESS: case SCTP_PARAM_IPV6_ADDRESS: case SCTP_PARAM_COOKIE_PRESERVATIVE: case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: case SCTP_PARAM_STATE_COOKIE: case SCTP_PARAM_HEARTBEAT_INFO: case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: case SCTP_PARAM_ECN_CAPABLE: case SCTP_PARAM_ADAPTATION_LAYER_IND: break; case SCTP_PARAM_SUPPORTED_EXT: if (!sctp_verify_ext_param(net, param)) return SCTP_IERROR_ABORT; break; case SCTP_PARAM_SET_PRIMARY: if (net->sctp.addip_enable) break; goto fallthrough; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ sctp_process_hn_param(asoc, param, chunk, err_chunk); retval = SCTP_IERROR_ABORT; break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) break; goto fallthrough; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Secion 6.1 * If the random number is not 32 byte long the association * MUST be aborted. The ABORT chunk SHOULD contain the error * cause 'Protocol Violation'. */ if (SCTP_AUTH_RANDOM_LENGTH != ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Section 3.2 * The CHUNKS parameter MUST be included once in the INIT or * INIT-ACK chunk if the sender wants to receive authenticated * chunks. Its maximum length is 260 bytes. */ if (260 < ntohs(param.p->length)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fallthrough; hmacs = (struct sctp_hmac_algo_param *)param.p; n_elt = (ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) >> 1; /* SCTP-AUTH: Section 6.1 * The HMAC algorithm based on SHA-1 MUST be supported and * included in the HMAC-ALGO parameter. */ for (i = 0; i < n_elt; i++) { id = ntohs(hmacs->hmac_ids[i]); if (id == SCTP_AUTH_HMAC_ID_SHA1) break; } if (id != SCTP_AUTH_HMAC_ID_SHA1) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; fallthrough: default: pr_debug("%s: unrecognized param:%d for chunk:%d\n", __func__, ntohs(param.p->type), cid); retval = sctp_process_unk_param(asoc, param, chunk, err_chunk); break; } return retval; } /* Verify the INIT packet before we process it. */ int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, sctp_cid_t cid, sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, struct sctp_chunk **errp) { union sctp_params param; bool has_cookie = false; int result; /* Check for missing mandatory parameters. Note: Initial TSN is * also mandatory, but is not checked here since the valid range * is 0..2**32-1. RFC4960, section 3.3.3. */ if (peer_init->init_hdr.num_outbound_streams == 0 || peer_init->init_hdr.num_inbound_streams == 0 || peer_init->init_hdr.init_tag == 0 || ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW) return sctp_process_inv_mandatory(asoc, chunk, errp); sctp_walk_params(param, peer_init, init_hdr.params) { if (param.p->type == SCTP_PARAM_STATE_COOKIE) has_cookie = true; } /* There is a possibility that a parameter length was bad and * in that case we would have stoped walking the parameters. * The current param.p would point at the bad one. * Current consensus on the mailing list is to generate a PROTOCOL * VIOLATION error. We build the ERROR chunk here and let the normal * error handling code build and send the packet. */ if (param.v != (void *)chunk->chunk_end) return sctp_process_inv_paramlength(asoc, param.p, chunk, errp); /* The only missing mandatory param possible today is * the state cookie for an INIT-ACK chunk. */ if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, chunk, errp); /* Verify all the variable length parameters */ sctp_walk_params(param, peer_init, init_hdr.params) { result = sctp_verify_param(net, ep, asoc, param, cid, chunk, errp); switch (result) { case SCTP_IERROR_ABORT: case SCTP_IERROR_NOMEM: return 0; case SCTP_IERROR_ERROR: return 1; case SCTP_IERROR_NO_ERROR: default: break; } } /* for (loop through all parameters) */ return 1; } /* Unpack the parameters in an INIT packet into an association. * Returns 0 on failure, else success. * FIXME: This is an association method. */ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, const union sctp_addr *peer_addr, sctp_init_chunk_t *peer_init, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_params param; struct sctp_transport *transport; struct list_head *pos, *temp; struct sctp_af *af; union sctp_addr addr; char *cookie; int src_match = 0; /* We must include the address that the INIT packet came from. * This is the only address that matters for an INIT packet. * When processing a COOKIE ECHO, we retrieve the from address * of the INIT from the cookie. */ /* This implementation defaults to making the first transport * added as the primary transport. The source address seems to * be a a better choice than any of the embedded addresses. */ if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) goto nomem; if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr)) src_match = 1; /* Process the initialization parameters. */ sctp_walk_params(param, peer_init, init_hdr.params) { if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS || param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, chunk->sctp_hdr->source, 0); if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) src_match = 1; } if (!sctp_process_param(asoc, param, peer_addr, gfp)) goto clean_up; } /* source address of chunk may not match any valid address */ if (!src_match) goto clean_up; /* AUTH: After processing the parameters, make sure that we * have all the required info to potentially do authentications. */ if (asoc->peer.auth_capable && (!asoc->peer.peer_random || !asoc->peer.peer_hmacs)) asoc->peer.auth_capable = 0; /* In a non-backward compatible mode, if the peer claims * support for ADD-IP but not AUTH, the ADD-IP spec states * that we MUST ABORT the association. Section 6. The section * also give us an option to silently ignore the packet, which * is what we'll do here. */ if (!net->sctp.addip_noauth && (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | SCTP_PARAM_DEL_IP | SCTP_PARAM_SET_PRIMARY); asoc->peer.asconf_capable = 0; goto clean_up; } /* Walk list of transports, removing transports in the UNKNOWN state. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state == SCTP_UNKNOWN) { sctp_assoc_rm_peer(asoc, transport); } } /* The fixed INIT headers are always in network byte * order. */ asoc->peer.i.init_tag = ntohl(peer_init->init_hdr.init_tag); asoc->peer.i.a_rwnd = ntohl(peer_init->init_hdr.a_rwnd); asoc->peer.i.num_outbound_streams = ntohs(peer_init->init_hdr.num_outbound_streams); asoc->peer.i.num_inbound_streams = ntohs(peer_init->init_hdr.num_inbound_streams); asoc->peer.i.initial_tsn = ntohl(peer_init->init_hdr.initial_tsn); /* Apply the upper bounds for output streams based on peer's * number of inbound streams. */ if (asoc->c.sinit_num_ostreams > ntohs(peer_init->init_hdr.num_inbound_streams)) { asoc->c.sinit_num_ostreams = ntohs(peer_init->init_hdr.num_inbound_streams); } if (asoc->c.sinit_max_instreams > ntohs(peer_init->init_hdr.num_outbound_streams)) { asoc->c.sinit_max_instreams = ntohs(peer_init->init_hdr.num_outbound_streams); } /* Copy Initiation tag from INIT to VT_peer in cookie. */ asoc->c.peer_vtag = asoc->peer.i.init_tag; /* Peer Rwnd : Current calculated value of the peer's rwnd. */ asoc->peer.rwnd = asoc->peer.i.a_rwnd; /* Copy cookie in case we need to resend COOKIE-ECHO. */ cookie = asoc->peer.cookie; if (cookie) { asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); if (!asoc->peer.cookie) goto clean_up; } /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily * high (for example, implementations MAY use the size of the receiver * advertised window). */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { transport->ssthresh = asoc->peer.i.a_rwnd; } /* Set up the TSN tracking pieces. */ if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, asoc->peer.i.initial_tsn, gfp)) goto clean_up; /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * * The stream sequence number in all the streams shall start * from 0 when the association is established. Also, when the * stream sequence number reaches the value 65535 the next * stream sequence number shall be set to 0. */ /* Allocate storage for the negotiated streams if it is not a temporary * association. */ if (!asoc->temp) { int error; asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, asoc->c.sinit_num_ostreams, gfp); if (!asoc->ssnmap) goto clean_up; error = sctp_assoc_set_id(asoc, gfp); if (error) goto clean_up; } /* ADDIP Section 4.1 ASCONF Chunk Procedures * * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do the following: * ... * A2) A serial number should be assigned to the Chunk. The serial * number should be a monotonically increasing number. All serial * numbers are defined to be initialized at the start of the * association to the same value as the Initial TSN. */ asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; return 1; clean_up: /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state != SCTP_ACTIVE) sctp_assoc_rm_peer(asoc, transport); } nomem: return 0; } /* Update asoc with the option described in param. * * RFC2960 3.3.2.1 Optional/Variable Length Parameters in INIT * * asoc is the association to update. * param is the variable length parameter to use for update. * cid tells us if this is an INIT, INIT ACK or COOKIE ECHO. * If the current packet is an INIT we want to minimize the amount of * work we do. In particular, we should not build transport * structures for the addresses. */ static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_addr addr; int i; __u16 sat; int retval = 1; sctp_scope_t scope; time_t stale; struct sctp_af *af; union sctp_addr_param *addr_param; struct sctp_transport *t; struct sctp_endpoint *ep = asoc->ep; /* We maintain all INIT parameters in network byte order all the * time. This allows us to not worry about whether the parameters * came from a fresh INIT, and INIT ACK, or were stored in a cookie. */ switch (param.p->type) { case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 != asoc->base.sk->sk_family) break; goto do_addr_param; case SCTP_PARAM_IPV4_ADDRESS: /* v4 addresses are not allowed on v6-only socket */ if (ipv6_only_sock(asoc->base.sk)) break; do_addr_param: af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); scope = sctp_scope(peer_addr); if (sctp_in_scope(net, &addr, scope)) if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) return 0; break; case SCTP_PARAM_COOKIE_PRESERVATIVE: if (!net->sctp.cookie_preserve_enable) break; stale = ntohl(param.life->lifespan_increment); /* Suggested Cookie Life span increment's unit is msec, * (1/1000sec). */ asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale); break; case SCTP_PARAM_HOST_NAME_ADDRESS: pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__); break; case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: /* Turn off the default values first so we'll know which * ones are really set by the peer. */ asoc->peer.ipv4_address = 0; asoc->peer.ipv6_address = 0; /* Assume that peer supports the address family * by which it sends a packet. */ if (peer_addr->sa.sa_family == AF_INET6) asoc->peer.ipv6_address = 1; else if (peer_addr->sa.sa_family == AF_INET) asoc->peer.ipv4_address = 1; /* Cycle through address types; avoid divide by 0. */ sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); if (sat) sat /= sizeof(__u16); for (i = 0; i < sat; ++i) { switch (param.sat->types[i]) { case SCTP_PARAM_IPV4_ADDRESS: asoc->peer.ipv4_address = 1; break; case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 == asoc->base.sk->sk_family) asoc->peer.ipv6_address = 1; break; case SCTP_PARAM_HOST_NAME_ADDRESS: asoc->peer.hostname_address = 1; break; default: /* Just ignore anything else. */ break; } } break; case SCTP_PARAM_STATE_COOKIE: asoc->peer.cookie_len = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); asoc->peer.cookie = param.cookie->body; break; case SCTP_PARAM_HEARTBEAT_INFO: /* Would be odd to receive, but it causes no problems. */ break; case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: /* Rejected during verify stage. */ break; case SCTP_PARAM_ECN_CAPABLE: asoc->peer.ecn_capable = 1; break; case SCTP_PARAM_ADAPTATION_LAYER_IND: asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind); break; case SCTP_PARAM_SET_PRIMARY: if (!net->sctp.addip_enable) goto fall_through; addr_param = param.v + sizeof(sctp_addip_param_t); af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* if the address is invalid, we can't process it. * XXX: see spec for what to do. */ if (!af->addr_valid(&addr, NULL, NULL)) break; t = sctp_assoc_lookup_paddr(asoc, &addr); if (!t) break; sctp_assoc_set_primary(asoc, t); break; case SCTP_PARAM_SUPPORTED_EXT: sctp_process_ext_param(asoc, param); break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) { asoc->peer.prsctp_capable = 1; break; } /* Fall Through */ goto fall_through; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fall_through; /* Save peer's random parameter */ asoc->peer.peer_random = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_random) { retval = 0; break; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fall_through; /* Save peer's HMAC list */ asoc->peer.peer_hmacs = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_hmacs) { retval = 0; break; } /* Set the default HMAC the peer requested*/ sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo); break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fall_through; asoc->peer.peer_chunks = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_chunks) retval = 0; break; fall_through: default: /* Any unrecognized parameters should have been caught * and handled by sctp_verify_param() which should be * called prior to this routine. Simply log the error * here. */ pr_debug("%s: ignoring param:%d for association:%p.\n", __func__, ntohs(param.p->type), asoc); break; } return retval; } /* Select a new verification tag. */ __u32 sctp_generate_tag(const struct sctp_endpoint *ep) { /* I believe that this random number generator complies with RFC1750. * A tag of 0 is reserved for special cases (e.g. INIT). */ __u32 x; do { get_random_bytes(&x, sizeof(__u32)); } while (x == 0); return x; } /* Select an initial TSN to send during startup. */ __u32 sctp_generate_tsn(const struct sctp_endpoint *ep) { __u32 retval; get_random_bytes(&retval, sizeof(__u32)); return retval; } /* * ADDIP 3.1.1 Address Configuration Change Chunk (ASCONF) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Address Parameter and other parameter will not be wrapped in this function */ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, union sctp_addr *addr, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; length += addrlen; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length); if (!retval) return NULL; asconf.serial = htonl(asoc->addip_serial++); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); retval->param_hdr.v = sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP * 3.2.1 Add IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC001 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 3.2.2 Delete IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC002 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * */ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, union sctp_addr *laddr, struct sockaddr *addrs, int addrcnt, __be16 flags) { sctp_addip_param_t param; struct sctp_chunk *retval; union sctp_addr_param addr_param; union sctp_addr *addr; void *addr_buf; struct sctp_af *af; int paramlen = sizeof(param); int addr_param_len = 0; int totallen = 0; int i; int del_pickup = 0; /* Get total length of all the address parameters. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); totallen += paramlen; totallen += addr_param_len; addr_buf += af->sockaddr_len; if (asoc->asconf_addr_del_pending && !del_pickup) { /* reuse the parameter length from the same scope one */ totallen += paramlen; totallen += addr_param_len; del_pickup = 1; pr_debug("%s: picked same-scope del_pending addr, " "totallen for all addresses is %d\n", __func__, totallen); } } /* Create an asconf chunk with the required length. */ retval = sctp_make_asconf(asoc, laddr, totallen); if (!retval) return NULL; /* Add the address parameters to the asconf chunk. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = flags; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); addr_buf += af->sockaddr_len; } if (flags == SCTP_PARAM_ADD_IP && del_pickup) { addr = asoc->asconf_addr_del_pending; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = SCTP_PARAM_DEL_IP; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); } return retval; } /* ADDIP * 3.2.4 Set Primary IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type =0xC004 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF chunk with Set Primary IP address parameter. */ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr) { sctp_addip_param_t param; struct sctp_chunk *retval; int len = sizeof(param); union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; len += addrlen; /* Create the chunk and make asconf header. */ retval = sctp_make_asconf(asoc, addr, len); if (!retval) return NULL; param.param_hdr.type = SCTP_PARAM_SET_PRIMARY; param.param_hdr.length = htons(len); param.crr_id = 0; sctp_addto_chunk(retval, sizeof(param), &param); sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0x80 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF_ACK chunk with enough space for the parameter responses. */ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, __u32 serial, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length); if (!retval) return NULL; asconf.serial = htonl(serial); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); return retval; } /* Add response parameters to an ASCONF_ACK chunk. */ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, __be16 err_code, sctp_addip_param_t *asconf_param) { sctp_addip_param_t ack_param; sctp_errhdr_t err_param; int asconf_param_len = 0; int err_param_len = 0; __be16 response_type; if (SCTP_ERROR_NO_ERROR == err_code) { response_type = SCTP_PARAM_SUCCESS_REPORT; } else { response_type = SCTP_PARAM_ERR_CAUSE; err_param_len = sizeof(err_param); if (asconf_param) asconf_param_len = ntohs(asconf_param->param_hdr.length); } /* Add Success Indication or Error Cause Indication parameter. */ ack_param.param_hdr.type = response_type; ack_param.param_hdr.length = htons(sizeof(ack_param) + err_param_len + asconf_param_len); ack_param.crr_id = crr_id; sctp_addto_chunk(chunk, sizeof(ack_param), &ack_param); if (SCTP_ERROR_NO_ERROR == err_code) return; /* Add Error Cause parameter. */ err_param.cause = err_code; err_param.length = htons(err_param_len + asconf_param_len); sctp_addto_chunk(chunk, err_param_len, &err_param); /* Add the failed TLV copied from ASCONF chunk. */ if (asconf_param) sctp_addto_chunk(chunk, asconf_param_len, asconf_param); } /* Process a asconf parameter. */ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, struct sctp_chunk *asconf, sctp_addip_param_t *asconf_param) { struct sctp_transport *peer; struct sctp_af *af; union sctp_addr addr; union sctp_addr_param *addr_param; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) return SCTP_ERROR_UNKNOWN_PARAM; switch (addr_param->p.type) { case SCTP_PARAM_IPV6_ADDRESS: if (!asoc->peer.ipv6_address) return SCTP_ERROR_DNS_FAILED; break; case SCTP_PARAM_IPV4_ADDRESS: if (!asoc->peer.ipv4_address) return SCTP_ERROR_DNS_FAILED; break; default: return SCTP_ERROR_DNS_FAILED; } af = sctp_get_af_specific(param_type2af(addr_param->p.type)); if (unlikely(!af)) return SCTP_ERROR_DNS_FAILED; af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast * or multicast address. * (note: wildcard is permitted and requires special handling so * make sure we check for that) */ if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb)) return SCTP_ERROR_DNS_FAILED; switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* Section 4.2.1: * If the address 0.0.0.0 or ::0 is provided, the source * address of the packet MUST be added. */ if (af->is_any(&addr)) memcpy(&addr, &asconf->source, sizeof(addr)); /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address * request and does not have the local resources to add this * new address to the association, it MUST return an Error * Cause TLV set to the new error code 'Operation Refused * Due to Resource Shortage'. */ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); if (!peer) return SCTP_ERROR_RSRC_LOW; /* Start the heartbeat timer. */ if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) sctp_transport_hold(peer); asoc->new_transport = peer; break; case SCTP_PARAM_DEL_IP: /* ADDIP 4.3 D7) If a request is received to delete the * last remaining IP address of a peer endpoint, the receiver * MUST send an Error Cause TLV with the error cause set to the * new error code 'Request to Delete Last Remaining IP Address'. */ if (asoc->peer.transport_count == 1) return SCTP_ERROR_DEL_LAST_IP; /* ADDIP 4.3 D8) If a request is received to delete an IP * address which is also the source address of the IP packet * which contained the ASCONF chunk, the receiver MUST reject * this request. To reject the request the receiver MUST send * an Error Cause TLV set to the new error code 'Request to * Delete Source IP Address' */ if (sctp_cmp_addr_exact(&asconf->source, &addr)) return SCTP_ERROR_DEL_SRC_IP; /* Section 4.2.2 * If the address 0.0.0.0 or ::0 is provided, all * addresses of the peer except the source address of the * packet MUST be deleted. */ if (af->is_any(&addr)) { sctp_assoc_set_primary(asoc, asconf->transport); sctp_assoc_del_nonprimary_peers(asoc, asconf->transport); } else sctp_assoc_del_peer(asoc, &addr); break; case SCTP_PARAM_SET_PRIMARY: /* ADDIP Section 4.2.4 * If the address 0.0.0.0 or ::0 is provided, the receiver * MAY mark the source address of the packet as its * primary. */ if (af->is_any(&addr)) memcpy(&addr.v4, sctp_source(asconf), sizeof(addr)); peer = sctp_assoc_lookup_paddr(asoc, &addr); if (!peer) return SCTP_ERROR_DNS_FAILED; sctp_assoc_set_primary(asoc, peer); break; } return SCTP_ERROR_NO_ERROR; } /* Verify the ASCONF packet before we process it. */ bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, bool addr_param_needed, struct sctp_paramhdr **errp) { sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; union sctp_params param; bool addr_param_seen = false; sctp_walk_params(param, addip, addip_hdr.params) { size_t length = ntohs(param.p->length); *errp = param.p; switch (param.p->type) { case SCTP_PARAM_ERR_CAUSE: break; case SCTP_PARAM_IPV4_ADDRESS: if (length != sizeof(sctp_ipv4addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_IPV6_ADDRESS: if (length != sizeof(sctp_ipv6addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: /* In ASCONF chunks, these need to be first. */ if (addr_param_needed && !addr_param_seen) return false; length = ntohs(param.addip->param_hdr.length); if (length < sizeof(sctp_addip_param_t) + sizeof(sctp_paramhdr_t)) return false; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(sctp_addip_param_t)) return false; break; default: /* This is unkown to us, reject! */ return false; } } /* Remaining sanity checks. */ if (addr_param_needed && !addr_param_seen) return false; if (!addr_param_needed && addr_param_seen) return false; if (param.v != chunk->chunk_end) return false; return true; } /* Process an incoming ASCONF chunk with the next expected serial no. and * return an ASCONF_ACK chunk to be sent in response. */ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, struct sctp_chunk *asconf) { sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr; bool all_param_pass = true; union sctp_params param; sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; struct sctp_chunk *asconf_ack; __be16 err_code; int length = 0; int chunk_len; __u32 serial; chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); hdr = (sctp_addiphdr_t *)asconf->skb->data; serial = ntohl(hdr->serial); /* Skip the addiphdr and store a pointer to address parameter. */ length = sizeof(sctp_addiphdr_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); chunk_len -= length; /* Skip the address parameter and store a pointer to the first * asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; chunk_len -= length; /* create an ASCONF_ACK chunk. * Based on the definitions of parameters, we know that the size of * ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF * parameters. */ asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4); if (!asconf_ack) goto done; /* Process the TLVs contained within the ASCONF chunk. */ sctp_walk_params(param, addip, addip_hdr.params) { /* Skip preceeding address parameters. */ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS || param.p->type == SCTP_PARAM_IPV6_ADDRESS) continue; err_code = sctp_process_asconf_param(asoc, asconf, param.addip); /* ADDIP 4.1 A7) * If an error response is received for a TLV parameter, * all TLVs with no response before the failed TLV are * considered successful if not reported. All TLVs after * the failed response are considered unsuccessful unless * a specific success indication is present for the parameter. */ if (err_code != SCTP_ERROR_NO_ERROR) all_param_pass = false; if (!all_param_pass) sctp_add_asconf_response(asconf_ack, param.addip->crr_id, err_code, param.addip); /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add * an IP address sends an 'Out of Resource' in its response, it * MUST also fail any subsequent add or delete requests bundled * in the ASCONF. */ if (err_code == SCTP_ERROR_RSRC_LOW) goto done; } done: asoc->peer.addip_serial++; /* If we are sending a new ASCONF_ACK hold a reference to it in assoc * after freeing the reference to old asconf ack if any. */ if (asconf_ack) { sctp_chunk_hold(asconf_ack); list_add_tail(&asconf_ack->transmitted_list, &asoc->asconf_ack_list); } return asconf_ack; } /* Process a asconf parameter that is successfully acked. */ static void sctp_asconf_param_success(struct sctp_association *asoc, sctp_addip_param_t *asconf_param) { struct sctp_af *af; union sctp_addr addr; struct sctp_bind_addr *bp = &asoc->base.bind_addr; union sctp_addr_param *addr_param; struct sctp_transport *transport; struct sctp_sockaddr_entry *saddr; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); /* We have checked the packet before, so we do not check again. */ af = sctp_get_af_specific(param_type2af(addr_param->p.type)); af->from_addr_param(&addr, addr_param, htons(bp->port), 0); switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* This is always done in BH context with a socket lock * held, so the list can not change. */ local_bh_disable(); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, &addr)) saddr->state = SCTP_ADDR_SRC; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; case SCTP_PARAM_DEL_IP: local_bh_disable(); sctp_del_bind_addr(bp, &addr); if (asoc->asconf_addr_del_pending != NULL && sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) { kfree(asoc->asconf_addr_del_pending); asoc->asconf_addr_del_pending = NULL; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; default: break; } } /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk * for the given asconf parameter. If there is no response for this parameter, * return the error code based on the third argument 'no_err'. * ADDIP 4.1 * A7) If an error response is received for a TLV parameter, all TLVs with no * response before the failed TLV are considered successful if not reported. * All TLVs after the failed response are considered unsuccessful unless a * specific success indication is present for the parameter. */ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, sctp_addip_param_t *asconf_param, int no_err) { sctp_addip_param_t *asconf_ack_param; sctp_errhdr_t *err_param; int length; int asconf_ack_len; __be16 err_code; if (no_err) err_code = SCTP_ERROR_NO_ERROR; else err_code = SCTP_ERROR_REQ_REFUSED; asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); /* Skip the addiphdr from the asconf_ack chunk and store a pointer to * the first asconf_ack parameter. */ length = sizeof(sctp_addiphdr_t); asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + length); asconf_ack_len -= length; while (asconf_ack_len > 0) { if (asconf_ack_param->crr_id == asconf_param->crr_id) { switch (asconf_ack_param->param_hdr.type) { case SCTP_PARAM_SUCCESS_REPORT: return SCTP_ERROR_NO_ERROR; case SCTP_PARAM_ERR_CAUSE: length = sizeof(sctp_addip_param_t); err_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; if (asconf_ack_len > 0) return err_param->cause; else return SCTP_ERROR_INV_PARAM; break; default: return SCTP_ERROR_INV_PARAM; } } length = ntohs(asconf_ack_param->param_hdr.length); asconf_ack_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; } return err_code; } /* Process an incoming ASCONF_ACK chunk against the cached last ASCONF chunk. */ int sctp_process_asconf_ack(struct sctp_association *asoc, struct sctp_chunk *asconf_ack) { struct sctp_chunk *asconf = asoc->addip_last_asconf; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; int length = 0; int asconf_len = asconf->skb->len; int all_param_pass = 0; int no_err = 1; int retval = 0; __be16 err_code = SCTP_ERROR_NO_ERROR; /* Skip the chunkhdr and addiphdr from the last asconf sent and store * a pointer to address parameter. */ length = sizeof(sctp_addip_chunk_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); asconf_len -= length; /* Skip the address parameter in the last asconf sent and store a * pointer to the first asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; asconf_len -= length; /* ADDIP 4.1 * A8) If there is no response(s) to specific TLV parameter(s), and no * failures are indicated, then all request(s) are considered * successful. */ if (asconf_ack->skb->len == sizeof(sctp_addiphdr_t)) all_param_pass = 1; /* Process the TLVs contained in the last sent ASCONF chunk. */ while (asconf_len > 0) { if (all_param_pass) err_code = SCTP_ERROR_NO_ERROR; else { err_code = sctp_get_asconf_response(asconf_ack, asconf_param, no_err); if (no_err && (SCTP_ERROR_NO_ERROR != err_code)) no_err = 0; } switch (err_code) { case SCTP_ERROR_NO_ERROR: sctp_asconf_param_success(asoc, asconf_param); break; case SCTP_ERROR_RSRC_LOW: retval = 1; break; case SCTP_ERROR_UNKNOWN_PARAM: /* Disable sending this type of asconf parameter in * future. */ asoc->peer.addip_disabled_mask |= asconf_param->param_hdr.type; break; case SCTP_ERROR_REQ_REFUSED: case SCTP_ERROR_DEL_LAST_IP: case SCTP_ERROR_DEL_SRC_IP: default: break; } /* Skip the processed asconf parameter and move to the next * one. */ length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; asconf_len -= length; } if (no_err && asoc->src_out_of_asoc_ok) { asoc->src_out_of_asoc_ok = 0; sctp_transport_immediate_rtx(asoc->peer.primary_path); } /* Free the cached last sent asconf chunk. */ list_del_init(&asconf->transmitted_list); sctp_chunk_free(asconf); asoc->addip_last_asconf = NULL; return retval; } /* Make a FWD TSN chunk. */ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist) { struct sctp_chunk *retval = NULL; struct sctp_fwdtsn_hdr ftsn_hdr; struct sctp_fwdtsn_skip skip; size_t hint; int i; hint = (nstreams + 1) * sizeof(__u32); retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint); if (!retval) return NULL; ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn); retval->subh.fwdtsn_hdr = sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr); for (i = 0; i < nstreams; i++) { skip.stream = skiplist[i].stream; skip.ssn = skiplist[i].ssn; sctp_addto_chunk(retval, sizeof(skip), &skip); } return retval; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2289_0
crossvul-cpp_data_bad_5825_0
/* * Copyright 2007 Bobby Bingham * Copyright 2012 Robert Nagy <ronag89 gmail com> * Copyright 2012 Anton Khirnov <anton khirnov net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * a filter enforcing given constant framerate */ #include <float.h> #include "libavutil/common.h" #include "libavutil/fifo.h" #include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavutil/parseutils.h" #include "avfilter.h" #include "internal.h" #include "video.h" typedef struct FPSContext { const AVClass *class; AVFifoBuffer *fifo; ///< store frames until we get two successive timestamps /* timestamps in input timebase */ int64_t first_pts; ///< pts of the first frame that arrived on this filter int64_t pts; ///< pts of the first frame currently in the fifo double start_time; ///< pts, in seconds, of the expected first frame AVRational framerate; ///< target framerate int rounding; ///< AVRounding method for timestamps /* statistics */ int frames_in; ///< number of frames on input int frames_out; ///< number of frames on output int dup; ///< number of frames duplicated int drop; ///< number of framed dropped } FPSContext; #define OFFSET(x) offsetof(FPSContext, x) #define V AV_OPT_FLAG_VIDEO_PARAM #define F AV_OPT_FLAG_FILTERING_PARAM static const AVOption fps_options[] = { { "fps", "A string describing desired output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, .flags = V|F }, { "start_time", "Assume the first PTS should be this value.", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX}, -DBL_MAX, DBL_MAX, V }, { "round", "set rounding method for timestamps", OFFSET(rounding), AV_OPT_TYPE_INT, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" }, { "zero", "round towards 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_ZERO }, 0, 5, V|F, "round" }, { "inf", "round away from 0", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_INF }, 0, 5, V|F, "round" }, { "down", "round towards -infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_DOWN }, 0, 5, V|F, "round" }, { "up", "round towards +infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_UP }, 0, 5, V|F, "round" }, { "near", "round to nearest", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" }, { NULL }, }; AVFILTER_DEFINE_CLASS(fps); static av_cold int init(AVFilterContext *ctx) { FPSContext *s = ctx->priv; if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*)))) return AVERROR(ENOMEM); s->pts = AV_NOPTS_VALUE; s->first_pts = AV_NOPTS_VALUE; av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den); return 0; } static void flush_fifo(AVFifoBuffer *fifo) { while (av_fifo_size(fifo)) { AVFrame *tmp; av_fifo_generic_read(fifo, &tmp, sizeof(tmp), NULL); av_frame_free(&tmp); } } static av_cold void uninit(AVFilterContext *ctx) { FPSContext *s = ctx->priv; if (s->fifo) { s->drop += av_fifo_size(s->fifo) / sizeof(AVFrame*); flush_fifo(s->fifo); av_fifo_free(s->fifo); } av_log(ctx, AV_LOG_VERBOSE, "%d frames in, %d frames out; %d frames dropped, " "%d frames duplicated.\n", s->frames_in, s->frames_out, s->drop, s->dup); } static int config_props(AVFilterLink* link) { FPSContext *s = link->src->priv; link->time_base = av_inv_q(s->framerate); link->frame_rate= s->framerate; link->w = link->src->inputs[0]->w; link->h = link->src->inputs[0]->h; return 0; } static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; FPSContext *s = ctx->priv; int frames_out = s->frames_out; int ret = 0; while (ret >= 0 && s->frames_out == frames_out) ret = ff_request_frame(ctx->inputs[0]); /* flush the fifo */ if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) { int i; for (i = 0; av_fifo_size(s->fifo); i++) { AVFrame *buf; av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL); buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base, outlink->time_base) + s->frames_out; if ((ret = ff_filter_frame(outlink, buf)) < 0) return ret; s->frames_out++; } return 0; } return ret; } static int write_to_fifo(AVFifoBuffer *fifo, AVFrame *buf) { int ret; if (!av_fifo_space(fifo) && (ret = av_fifo_realloc2(fifo, 2*av_fifo_size(fifo)))) { av_frame_free(&buf); return ret; } av_fifo_generic_write(fifo, &buf, sizeof(buf), NULL); return 0; } static int filter_frame(AVFilterLink *inlink, AVFrame *buf) { AVFilterContext *ctx = inlink->dst; FPSContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int64_t delta; int i, ret; s->frames_in++; /* discard frames until we get the first timestamp */ if (s->pts == AV_NOPTS_VALUE) { if (buf->pts != AV_NOPTS_VALUE) { ret = write_to_fifo(s->fifo, buf); if (ret < 0) return ret; if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) { double first_pts = s->start_time * AV_TIME_BASE; first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX); s->first_pts = s->pts = av_rescale_q(first_pts, AV_TIME_BASE_Q, inlink->time_base); av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")\n", s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q, outlink->time_base)); } else { s->first_pts = s->pts = buf->pts; } } else { av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no " "timestamp.\n"); av_frame_free(&buf); s->drop++; } return 0; } /* now wait for the next timestamp */ if (buf->pts == AV_NOPTS_VALUE) { return write_to_fifo(s->fifo, buf); } /* number of output frames */ delta = av_rescale_q_rnd(buf->pts - s->pts, inlink->time_base, outlink->time_base, s->rounding); if (delta < 1) { /* drop the frame and everything buffered except the first */ AVFrame *tmp; int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*); av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop); s->drop += drop; av_fifo_generic_read(s->fifo, &tmp, sizeof(tmp), NULL); flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, tmp); av_frame_free(&buf); return ret; } /* can output >= 1 frames */ for (i = 0; i < delta; i++) { AVFrame *buf_out; av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL); /* duplicate the frame if needed */ if (!av_fifo_size(s->fifo) && i < delta - 1) { AVFrame *dup = av_frame_clone(buf_out); av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n"); if (dup) ret = write_to_fifo(s->fifo, dup); else ret = AVERROR(ENOMEM); if (ret < 0) { av_frame_free(&buf_out); av_frame_free(&buf); return ret; } s->dup++; } buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base, outlink->time_base) + s->frames_out; if ((ret = ff_filter_frame(outlink, buf_out)) < 0) { av_frame_free(&buf); return ret; } s->frames_out++; } flush_fifo(s->fifo); ret = write_to_fifo(s->fifo, buf); s->pts = s->first_pts + av_rescale_q(s->frames_out, outlink->time_base, inlink->time_base); return ret; } static const AVFilterPad avfilter_vf_fps_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .filter_frame = filter_frame, }, { NULL } }; static const AVFilterPad avfilter_vf_fps_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .request_frame = request_frame, .config_props = config_props }, { NULL } }; AVFilter avfilter_vf_fps = { .name = "fps", .description = NULL_IF_CONFIG_SMALL("Force constant framerate."), .init = init, .uninit = uninit, .priv_size = sizeof(FPSContext), .priv_class = &fps_class, .inputs = avfilter_vf_fps_inputs, .outputs = avfilter_vf_fps_outputs, };
./CrossVul/dataset_final_sorted/CWE-399/c/bad_5825_0
crossvul-cpp_data_good_946_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO GGGGG RRRR IIIII FFFFF Y Y % % MM MM O O G R R I F Y Y % % M M M O O G GGG RRRR I FFF Y % % M M O O G G R R I F Y % % M M OOO GGGG R R IIIII F Y % % % % % % MagickWand Module Methods % % % % Software Design % % Cristy % % March 2000 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use the mogrify program to resize an image, blur, crop, despeckle, dither, % draw on, flip, join, re-sample, and much more. This tool is similiar to % convert except that the original image file is overwritten (unless you % change the file suffix with the -format option) with any changes you % request. % */ /* Include declarations. */ #include "MagickWand/studio.h" #include "MagickWand/MagickWand.h" #include "MagickWand/magick-wand-private.h" #include "MagickWand/mogrify-private.h" #include "MagickCore/blob-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/image-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_HAVE_UTIME_H) #include <utime.h> #endif /* Constant declaration. */ static const char MogrifyAlphaColor[] = "#bdbdbd", /* gray */ MogrifyBackgroundColor[] = "#ffffff", /* white */ MogrifyBorderColor[] = "#dfdfdf"; /* gray */ /* Define declarations. */ #define UndefinedCompressionQuality 0UL /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o m m a n d G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickCommandGenesis() applies image processing options to an image as % prescribed by command line options. % % It wiil look for special options like "-debug", "-bench", and % "-distribute-cache" that needs to be applied even before the main % processing begins, and may completely overrule normal command processing. % Such 'Genesis' Options can only be given on the CLI, (not in a script) % and are typically ignored (as they have been handled) if seen later. % % The format of the MagickCommandGenesis method is: % % MagickBooleanType MagickCommandGenesis(ImageInfo *image_info, % MagickCommand command,int argc,char **argv,char **metadata, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o command: Choose from ConvertImageCommand, IdentifyImageCommand, % MogrifyImageCommand, CompositeImageCommand, CompareImagesCommand, % ConjureImageCommand, StreamImageCommand, ImportImageCommand, % DisplayImageCommand, or AnimateImageCommand. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o metadata: any metadata is returned here. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MagickCommandGenesis(ImageInfo *image_info, MagickCommand command,int argc,char **argv,char **metadata, ExceptionInfo *exception) { char client_name[MaxTextExtent], *option; double duration, serial; MagickBooleanType concurrent, regard_warnings, status; register ssize_t i; size_t iterations, number_threads; ssize_t n; (void) setlocale(LC_ALL,""); (void) setlocale(LC_NUMERIC,"C"); GetPathComponent(argv[0],TailPath,client_name); (void) SetClientName(client_name); concurrent=MagickFalse; duration=(-1.0); iterations=1; status=MagickTrue; regard_warnings=MagickFalse; for (i=1; i < (ssize_t) (argc-1); i++) { option=argv[i]; if ((strlen(option) == 1) || ((*option != '-') && (*option != '+'))) continue; if (LocaleCompare("-bench",option) == 0) iterations=StringToUnsignedLong(argv[++i]); if (LocaleCompare("-concurrent",option) == 0) concurrent=MagickTrue; if (LocaleCompare("-debug",option) == 0) (void) SetLogEventMask(argv[++i]); if (LocaleCompare("-distribute-cache",option) == 0) { DistributePixelCacheServer(StringToInteger(argv[++i]),exception); exit(0); } if (LocaleCompare("-duration",option) == 0) duration=StringToDouble(argv[++i],(char **) NULL); if (LocaleCompare("-regard-warnings",option) == 0) regard_warnings=MagickTrue; } if (iterations == 1) { char *text; text=(char *) NULL; status=command(image_info,argc,argv,&text,exception); if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if (text != (char *) NULL) { if (metadata != (char **) NULL) (void) ConcatenateString(&(*metadata),text); text=DestroyString(text); } return(status); } number_threads=GetOpenMPMaximumThreads(); serial=0.0; for (n=1; n <= (ssize_t) number_threads; n++) { double e, parallel, user_time; TimerInfo *timer; (void) SetMagickResourceLimit(ThreadResource,(MagickSizeType) n); timer=AcquireTimerInfo(); if (concurrent == MagickFalse) { for (i=0; i < (ssize_t) iterations; i++) { char *text; text=(char *) NULL; if (status == MagickFalse) continue; if (duration > 0) { if (GetElapsedTime(timer) > duration) continue; (void) ContinueTimer(timer); } status=command(image_info,argc,argv,&text,exception); if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if (text != (char *) NULL) { if (metadata != (char **) NULL) (void) ConcatenateString(&(*metadata),text); text=DestroyString(text); } } } else { SetOpenMPNested(1); #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp parallel for shared(status) #endif for (i=0; i < (ssize_t) iterations; i++) { char *text; text=(char *) NULL; if (status == MagickFalse) continue; if (duration > 0) { if (GetElapsedTime(timer) > duration) continue; (void) ContinueTimer(timer); } status=command(image_info,argc,argv,&text,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_MagickCommandGenesis) #endif { if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if (text != (char *) NULL) { if (metadata != (char **) NULL) (void) ConcatenateString(&(*metadata),text); text=DestroyString(text); } } } } user_time=GetUserTime(timer); parallel=GetElapsedTime(timer); e=1.0; if (n == 1) serial=parallel; else e=((1.0/(1.0/((serial/(serial+parallel))+(1.0-(serial/(serial+parallel)))/ (double) n)))-(1.0/(double) n))/(1.0-1.0/(double) n); (void) FormatLocaleFile(stderr, " Performance[%.20g]: %.20gi %0.3fips %0.6fe %0.6fu %lu:%02lu.%03lu\n", (double) n,(double) iterations,(double) iterations/parallel,e,user_time, (unsigned long) (parallel/60.0),(unsigned long) floor(fmod(parallel, 60.0)),(unsigned long) (1000.0*(parallel-floor(parallel))+0.5)); timer=DestroyTimerInfo(timer); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImage() applies simple single image processing options to a single % image that may be part of a large list, but also handles any 'region' % image handling. % % The image in the list may be modified in three different ways... % % * directly modified (EG: -negate, -gamma, -level, -annotate, -draw), % * replaced by a new image (EG: -spread, -resize, -rotate, -morphology) % * replace by a list of images (only the -separate option!) % % In each case the result is returned into the list, and a pointer to the % modified image (last image added if replaced by a list of images) is % returned. % % ASIDE: The -crop is present but restricted to non-tile single image crops % % This means if all the images are being processed (such as by % MogrifyImages(), next image to be processed will be as per the pointer % (*image)->next. Also the image list may grow as a result of some specific % operations but as images are never merged or deleted, it will never shrink % in length. Typically the list will remain the same length. % % WARNING: As the image pointed to may be replaced, the first image in the % list may also change. GetFirstImageInList() should be used by caller if % they wish return the Image pointer to the first image in list. % % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc, % const char **argv,Image **image) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Image *GetImageCache(const ImageInfo *image_info,const char *path, ExceptionInfo *exception) { char key[MagickPathExtent]; ExceptionInfo *sans_exception; Image *image; ImageInfo *read_info; /* Read an image into a image cache (for repeated usage) if not already in cache. Then return the image that is in the cache. */ (void) FormatLocaleString(key,MagickPathExtent,"cache:%s",path); sans_exception=AcquireExceptionInfo(); image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (image != (Image *) NULL) return(image); read_info=CloneImageInfo(image_info); (void) CopyMagickString(read_info->filename,path,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) (void) SetImageRegistry(ImageRegistryType,key,image,exception); return(image); } static inline MagickBooleanType IsPathWritable(const char *path) { if (IsPathAccessible(path) == MagickFalse) return(MagickFalse); if (access_utf8(path,W_OK) != 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType MonitorProgress(const char *text, const MagickOffsetType offset,const MagickSizeType extent, void *wand_unused(client_data)) { char message[MagickPathExtent], tag[MagickPathExtent]; const char *locale_message; register char *p; magick_unreferenced(client_data); if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent)) return(MagickTrue); if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0)) return(MagickTrue); (void) CopyMagickString(tag,text,MagickPathExtent); p=strrchr(tag,'/'); if (p != (char *) NULL) *p='\0'; (void) FormatLocaleString(message,MagickPathExtent,"Monitor/%s",tag); locale_message=GetLocaleMessage(message); if (locale_message == message) locale_message=tag; if (p == (char *) NULL) (void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r", locale_message,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); else (void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r", locale_message,p+1,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); if (offset == (MagickOffsetType) (extent-1)) (void) FormatLocaleFile(stderr,"\n"); (void) fflush(stderr); return(MagickTrue); } static Image *SparseColorOption(const Image *image, const SparseColorMethod method,const char *arguments, const MagickBooleanType color_from_image,ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p; double *sparse_arguments; Image *sparse_image; PixelInfo color; MagickBooleanType error; register size_t x; size_t number_arguments, number_colors; /* SparseColorOption() parses the complex -sparse-color argument into an an array of floating point values then calls SparseColorImage(). Argument is a complex mix of floating-point pixel coodinates, and color specifications (or direct floating point numbers). The number of floats needed to represent a color varies depending on the current channel setting. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Limit channels according to image - and add up number of color channel. */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) number_colors++; /* Read string, to determine number of arguments needed, */ p=arguments; x=0; while( *p != '\0' ) { GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == ',' ) continue; if ( isalpha((int) token[0]) || token[0] == '#' ) { if ( color_from_image ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color arg given, when colors are coming from image"); return( (Image *) NULL); } x += number_colors; /* color argument */ } else { x++; /* floating point argument */ } } error=MagickTrue; if ( color_from_image ) { /* just the control points are being given */ error = ( x % 2 != 0 ) ? MagickTrue : MagickFalse; number_arguments=(x/2)*(2+number_colors); } else { /* control points and color values */ error = ( x % (2+number_colors) != 0 ) ? MagickTrue : MagickFalse; number_arguments=x; } if ( error ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Invalid number of Arguments"); return( (Image *) NULL); } /* Allocate and fill in the floating point arguments */ sparse_arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*sparse_arguments)); if (sparse_arguments == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError, " MemoryAllocationFailed\n""%s","SparseColorOption"); return( (Image *) NULL); } (void) memset(sparse_arguments,0,number_arguments* sizeof(*sparse_arguments)); p=arguments; x=0; while( *p != '\0' && x < number_arguments ) { /* X coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of X-coord"); error = MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* Y coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of Y-coord"); error = MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* color values for this control point */ #if 0 if ( (color_from_image ) { /* get color from image */ /* HOW??? */ } else #endif { /* color name or function given in string argument */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { /* Color string given */ (void) QueryColorCompliance(token,AllCompliance,&color,exception); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.blue; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) sparse_arguments[x++] = QuantumScale*color.black; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) sparse_arguments[x++] = QuantumScale*color.alpha; } else { /* Colors given as a set of floating point values - experimental */ /* NB: token contains the first floating point value to use! */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } } } } if ( number_arguments != x && !error ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, " InvalidArgument","'%s': %s","sparse-color","Argument Parsing Error"); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( (Image *) NULL); } if ( error ) return( (Image *) NULL); /* Call the Interpolation function with the parsed arguments */ sparse_image=SparseColorImage(image,method,number_arguments,sparse_arguments, exception); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( sparse_image ); } WandExport MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc, const char **argv,Image **image,ExceptionInfo *exception) { CompositeOperator compose; const char *format, *option; double attenuate; DrawInfo *draw_info; GeometryInfo geometry_info; ImageInfo *mogrify_info; MagickStatusType status; PixelInfo fill; MagickStatusType flags; PixelInterpolateMethod interpolate_method; QuantizeInfo *quantize_info; RectangleInfo geometry, region_geometry; register ssize_t i; /* Initialize method variables. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (argc < 0) return(MagickTrue); mogrify_info=CloneImageInfo(image_info); draw_info=CloneDrawInfo(mogrify_info,(DrawInfo *) NULL); quantize_info=AcquireQuantizeInfo(mogrify_info); SetGeometryInfo(&geometry_info); GetPixelInfo(*image,&fill); fill=(*image)->background_color; attenuate=1.0; compose=(*image)->compose; interpolate_method=UndefinedInterpolatePixel; format=GetImageOption(mogrify_info,"format"); SetGeometry(*image,&region_geometry); /* Transmogrify the image. */ for (i=0; i < (ssize_t) argc; i++) { Image *mogrify_image; ssize_t count; option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=MagickMax(ParseCommandOption(MagickCommandOptions,MagickFalse,option), 0L); if ((i+count) >= (ssize_t) argc) break; status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception); mogrify_image=(Image *) NULL; switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { /* Adaptive blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=AdaptiveBlurImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { /* Adaptive resize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=AdaptiveResizeImage(*image,geometry.width, geometry.height,exception); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { /* Adaptive sharpen image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=AdaptiveSharpenImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("affine",option+1) == 0) { /* Affine matrix. */ if (*option == '+') { GetAffineMatrix(&draw_info->affine); break; } (void) ParseAffineGeometry(argv[i+1],&draw_info->affine,exception); break; } if (LocaleCompare("alpha",option+1) == 0) { AlphaChannelOption alpha_type; (void) SyncImageSettings(mogrify_info,*image,exception); alpha_type=(AlphaChannelOption) ParseCommandOption( MagickAlphaChannelOptions,MagickFalse,argv[i+1]); (void) SetImageAlphaChannel(*image,alpha_type,exception); break; } if (LocaleCompare("annotate",option+1) == 0) { char *text, geometry_str[MagickPathExtent]; /* Annotate image. */ (void) SyncImageSettings(mogrify_info,*image,exception); SetGeometryInfo(&geometry_info); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; text=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (text == (char *) NULL) break; (void) CloneString(&draw_info->text,text); text=DestroyString(text); (void) FormatLocaleString(geometry_str,MagickPathExtent,"%+f%+f", geometry_info.xi,geometry_info.psi); (void) CloneString(&draw_info->geometry,geometry_str); draw_info->affine.sx=cos(DegreesToRadians( fmod(geometry_info.rho,360.0))); draw_info->affine.rx=sin(DegreesToRadians( fmod(geometry_info.rho,360.0))); draw_info->affine.ry=(-sin(DegreesToRadians( fmod(geometry_info.sigma,360.0)))); draw_info->affine.sy=cos(DegreesToRadians( fmod(geometry_info.sigma,360.0))); (void) AnnotateImage(*image,draw_info,exception); break; } if (LocaleCompare("antialias",option+1) == 0) { draw_info->stroke_antialias=(*option == '-') ? MagickTrue : MagickFalse; draw_info->text_antialias=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("attenuate",option+1) == 0) { if (*option == '+') { attenuate=1.0; break; } attenuate=StringToDouble(argv[i+1],(char **) NULL); break; } if (LocaleCompare("auto-gamma",option+1) == 0) { /* Auto Adjust Gamma of image based on its mean */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) AutoGammaImage(*image,exception); break; } if (LocaleCompare("auto-level",option+1) == 0) { /* Perfectly Normalize (max/min stretch) the image */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) AutoLevelImage(*image,exception); break; } if (LocaleCompare("auto-orient",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=AutoOrientImage(*image,(*image)->orientation, exception); break; } if (LocaleCompare("auto-threshold",option+1) == 0) { AutoThresholdMethod method; (void) SyncImageSettings(mogrify_info,*image,exception); method=(AutoThresholdMethod) ParseCommandOption( MagickAutoThresholdOptions,MagickFalse,argv[i+1]); (void) AutoThresholdImage(*image,method,exception); break; } break; } case 'b': { if (LocaleCompare("black-threshold",option+1) == 0) { /* Black threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) BlackThresholdImage(*image,argv[i+1],exception); break; } if (LocaleCompare("blue-shift",option+1) == 0) { /* Blue shift image. */ (void) SyncImageSettings(mogrify_info,*image,exception); geometry_info.rho=1.5; if (*option == '-') flags=ParseGeometry(argv[i+1],&geometry_info); mogrify_image=BlueShiftImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("blur",option+1) == 0) { /* Gaussian blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.0; mogrify_image=BlurImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("border",option+1) == 0) { /* Surround image with a border of solid color. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=BorderImage(*image,&geometry,compose,exception); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') { (void) QueryColorCompliance(MogrifyBorderColor,AllCompliance, &draw_info->border_color,exception); break; } (void) QueryColorCompliance(argv[i+1],AllCompliance, &draw_info->border_color,exception); break; } if (LocaleCompare("box",option+1) == 0) { (void) QueryColorCompliance(argv[i+1],AllCompliance, &draw_info->undercolor,exception); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { double brightness, contrast; /* Brightness / contrast image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); brightness=geometry_info.rho; contrast=0.0; if ((flags & SigmaValue) != 0) contrast=geometry_info.sigma; (void) BrightnessContrastImage(*image,brightness,contrast, exception); break; } break; } case 'c': { if (LocaleCompare("canny",option+1) == 0) { /* Detect edges in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.10; if ((flags & PsiValue) == 0) geometry_info.psi=0.30; if ((flags & PercentValue) != 0) { geometry_info.xi/=100.0; geometry_info.psi/=100.0; } mogrify_image=CannyEdgeImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception); break; } if (LocaleCompare("cdl",option+1) == 0) { char *color_correction_collection; /* Color correct with a color decision list. */ (void) SyncImageSettings(mogrify_info,*image,exception); color_correction_collection=FileToString(argv[i+1],~0UL,exception); if (color_correction_collection == (char *) NULL) break; (void) ColorDecisionListImage(*image,color_correction_collection, exception); break; } if (LocaleCompare("channel",option+1) == 0) { ChannelType channel; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) SetPixelChannelMask(*image,DefaultChannels); break; } channel=(ChannelType) ParseChannelOption(argv[i+1]); (void) SetPixelChannelMask(*image,channel); break; } if (LocaleCompare("charcoal",option+1) == 0) { /* Charcoal image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; mogrify_image=CharcoalImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("chop",option+1) == 0) { /* Chop the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ChopImage(*image,&geometry,exception); break; } if (LocaleCompare("clahe",option+1) == 0) { /* Contrast limited adaptive histogram equalization. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); flags=ParseGeometry(argv[i+1],&geometry_info); (void) CLAHEImage(*image,geometry.width,geometry.height, (size_t) geometry.x,geometry_info.psi,exception); break; } if (LocaleCompare("clip",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) SetImageMask(*image,WritePixelMask,(Image *) NULL, exception); break; } (void) ClipImage(*image,exception); break; } if (LocaleCompare("clip-mask",option+1) == 0) { Image *clip_mask; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,WritePixelMask,(Image *) NULL, exception); break; } /* Set the image mask. */ clip_mask=GetImageCache(mogrify_info,argv[i+1],exception); if (clip_mask == (Image *) NULL) break; (void) SetImageMask(*image,WritePixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); break; } if (LocaleCompare("clip-path",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) ClipImagePath(*image,argv[i+1],*option == '-' ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("colorize",option+1) == 0) { /* Colorize the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=ColorizeImage(*image,argv[i+1],&fill,exception); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel; (void) SyncImageSettings(mogrify_info,*image,exception); kernel=AcquireKernelInfo(argv[i+1],exception); if (kernel == (KernelInfo *) NULL) break; /* FUTURE: check on size of the matrix */ mogrify_image=ColorMatrixImage(*image,kernel,exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("colors",option+1) == 0) { /* Reduce the number of colors in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); quantize_info->number_colors=StringToUnsignedLong(argv[i+1]); if (quantize_info->number_colors == 0) break; if (((*image)->storage_class == DirectClass) || (*image)->colors > quantize_info->number_colors) (void) QuantizeImage(quantize_info,*image,exception); else (void) CompressImageColormap(*image,exception); break; } if (LocaleCompare("colorspace",option+1) == 0) { ColorspaceType colorspace; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) TransformImageColorspace(*image,sRGBColorspace, exception); break; } colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); (void) TransformImageColorspace(*image,colorspace,exception); break; } if (LocaleCompare("compose",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,argv[i+1]); break; } if (LocaleCompare("connected-components",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=ConnectedComponentsImage(*image,(size_t) StringToInteger(argv[i+1]),(CCObjectInfo **) NULL,exception); break; } if (LocaleCompare("contrast",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) ContrastImage(*image,(*option == '-') ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("contrast-stretch",option+1) == 0) { double black_point, white_point; /* Contrast stretch image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma : black_point; if ((flags & PercentValue) != 0) { black_point*=(double) (*image)->columns*(*image)->rows/100.0; white_point*=(double) (*image)->columns*(*image)->rows/100.0; } white_point=(double) (*image)->columns*(*image)->rows- white_point; (void) ContrastStretchImage(*image,black_point,white_point, exception); break; } if (LocaleCompare("convolve",option+1) == 0) { double gamma; KernelInfo *kernel_info; register ssize_t j; size_t extent; (void) SyncImageSettings(mogrify_info,*image,exception); kernel_info=AcquireKernelInfo(argv[i+1],exception); if (kernel_info == (KernelInfo *) NULL) break; extent=kernel_info->width*kernel_info->height; gamma=0.0; for (j=0; j < (ssize_t) extent; j++) gamma+=kernel_info->values[j]; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); for (j=0; j < (ssize_t) extent; j++) kernel_info->values[j]*=gamma; mogrify_image=MorphologyImage(*image,CorrelateMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("crop",option+1) == 0) { /* Crop a image to a smaller size */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=CropImageToTiles(*image,argv[i+1],exception); break; } if (LocaleCompare("cycle",option+1) == 0) { /* Cycle an image colormap. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) CycleColormapImage(*image,(ssize_t) StringToLong(argv[i+1]), exception); break; } break; } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { StringInfo *passkey; /* Decipher pixels. */ (void) SyncImageSettings(mogrify_info,*image,exception); passkey=FileToStringInfo(argv[i+1],~0UL,exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyDecipherImage(*image,passkey,exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("density",option+1) == 0) { /* Set image density. */ (void) CloneString(&draw_info->density,argv[i+1]); break; } if (LocaleCompare("depth",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) SetImageDepth(*image,MAGICKCORE_QUANTUM_DEPTH,exception); break; } (void) SetImageDepth(*image,StringToUnsignedLong(argv[i+1]), exception); break; } if (LocaleCompare("deskew",option+1) == 0) { double threshold; /* Straighten the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') threshold=40.0*QuantumRange/100.0; else threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); mogrify_image=DeskewImage(*image,threshold,exception); break; } if (LocaleCompare("despeckle",option+1) == 0) { /* Reduce the speckles within an image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=DespeckleImage(*image,exception); break; } if (LocaleCompare("display",option+1) == 0) { (void) CloneString(&draw_info->server_name,argv[i+1]); break; } if (LocaleCompare("distort",option+1) == 0) { char *args, token[MagickPathExtent]; const char *p; DistortMethod method; double *arguments; register ssize_t x; size_t number_arguments; /* Distort image. */ (void) SyncImageSettings(mogrify_info,*image,exception); method=(DistortMethod) ParseCommandOption(MagickDistortOptions, MagickFalse,argv[i+1]); if (method == ResizeDistortion) { double resize_args[2]; /* Special Case - Argument is actually a resize geometry! Convert that to an appropriate distortion argument array. */ (void) ParseRegionGeometry(*image,argv[i+2],&geometry, exception); resize_args[0]=(double) geometry.width; resize_args[1]=(double) geometry.height; mogrify_image=DistortImage(*image,method,(size_t)2, resize_args,MagickTrue,exception); break; } args=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (args == (char *) NULL) break; p=(char *) args; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } number_arguments=(size_t) x; arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*arguments)); if (arguments == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*image)->filename); (void) memset(arguments,0,number_arguments* sizeof(*arguments)); p=(char *) args; for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arguments[x]=StringToDouble(token,(char **) NULL); } args=DestroyString(args); mogrify_image=DistortImage(*image,method,number_arguments,arguments, (*option == '+') ? MagickTrue : MagickFalse,exception); arguments=(double *) RelinquishMagickMemory(arguments); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { quantize_info->dither_method=NoDitherMethod; break; } quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("draw",option+1) == 0) { /* Draw image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) CloneString(&draw_info->primitive,argv[i+1]); (void) DrawImage(*image,draw_info,exception); break; } break; } case 'e': { if (LocaleCompare("edge",option+1) == 0) { /* Enhance edges in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); mogrify_image=EdgeImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("emboss",option+1) == 0) { /* Emboss image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=EmbossImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("encipher",option+1) == 0) { StringInfo *passkey; /* Encipher pixels. */ (void) SyncImageSettings(mogrify_info,*image,exception); passkey=FileToStringInfo(argv[i+1],~0UL,exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyEncipherImage(*image,passkey,exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("encoding",option+1) == 0) { (void) CloneString(&draw_info->encoding,argv[i+1]); break; } if (LocaleCompare("enhance",option+1) == 0) { /* Enhance image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=EnhanceImage(*image,exception); break; } if (LocaleCompare("equalize",option+1) == 0) { /* Equalize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) EqualizeImage(*image,exception); break; } if (LocaleCompare("evaluate",option+1) == 0) { double constant; MagickEvaluateOperator op; (void) SyncImageSettings(mogrify_info,*image,exception); op=(MagickEvaluateOperator) ParseCommandOption( MagickEvaluateOptions,MagickFalse,argv[i+1]); constant=StringToDoubleInterval(argv[i+2],(double) QuantumRange+ 1.0); (void) EvaluateImage(*image,op,constant,exception); break; } if (LocaleCompare("extent",option+1) == 0) { /* Set the image extent. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception); if (geometry.width == 0) geometry.width=(*image)->columns; if (geometry.height == 0) geometry.height=(*image)->rows; mogrify_image=ExtentImage(*image,&geometry,exception); break; } break; } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option == '+') { if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); break; } (void) CloneString(&draw_info->family,argv[i+1]); break; } if (LocaleCompare("features",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:features"); break; } (void) SetImageArtifact(*image,"identify:features",argv[i+1]); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("fill",option+1) == 0) { ExceptionInfo *sans; PixelInfo color; GetPixelInfo(*image,&fill); if (*option == '+') { (void) QueryColorCompliance("none",AllCompliance,&fill, exception); draw_info->fill=fill; if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); break; } sans=AcquireExceptionInfo(); status=QueryColorCompliance(argv[i+1],AllCompliance,&color,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1], exception); else draw_info->fill=fill=color; break; } if (LocaleCompare("flip",option+1) == 0) { /* Flip image scanlines. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=FlipImage(*image,exception); break; } if (LocaleCompare("floodfill",option+1) == 0) { PixelInfo target; /* Floodfill image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParsePageGeometry(*image,argv[i+1],&geometry,exception); (void) QueryColorCompliance(argv[i+2],AllCompliance,&target, exception); (void) FloodfillPaintImage(*image,draw_info,&target,geometry.x, geometry.y,*option == '-' ? MagickFalse : MagickTrue,exception); break; } if (LocaleCompare("flop",option+1) == 0) { /* Flop image scanlines. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=FlopImage(*image,exception); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') { if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); break; } (void) CloneString(&draw_info->font,argv[i+1]); break; } if (LocaleCompare("format",option+1) == 0) { format=argv[i+1]; break; } if (LocaleCompare("frame",option+1) == 0) { FrameInfo frame_info; /* Surround image with an ornamental border. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); frame_info.width=geometry.width; frame_info.height=geometry.height; frame_info.outer_bevel=geometry.x; frame_info.inner_bevel=geometry.y; frame_info.x=(ssize_t) frame_info.width; frame_info.y=(ssize_t) frame_info.height; frame_info.width=(*image)->columns+2*frame_info.width; frame_info.height=(*image)->rows+2*frame_info.height; mogrify_image=FrameImage(*image,&frame_info,compose,exception); break; } if (LocaleCompare("function",option+1) == 0) { char *arguments, token[MagickPathExtent]; const char *p; double *parameters; MagickFunction function; register ssize_t x; size_t number_parameters; /* Function Modify Image Values */ (void) SyncImageSettings(mogrify_info,*image,exception); function=(MagickFunction) ParseCommandOption(MagickFunctionOptions, MagickFalse,argv[i+1]); arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (arguments == (char *) NULL) break; p=(char *) arguments; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } number_parameters=(size_t) x; parameters=(double *) AcquireQuantumMemory(number_parameters, sizeof(*parameters)); if (parameters == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*image)->filename); (void) memset(parameters,0,number_parameters* sizeof(*parameters)); p=(char *) arguments; for (x=0; (x < (ssize_t) number_parameters) && (*p != '\0'); x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); parameters[x]=StringToDouble(token,(char **) NULL); } arguments=DestroyString(arguments); (void) FunctionImage(*image,function,number_parameters,parameters, exception); parameters=(double *) RelinquishMagickMemory(parameters); break; } break; } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { /* Gamma image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') (*image)->gamma=StringToDouble(argv[i+1],(char **) NULL); else (void) GammaImage(*image,StringToDouble(argv[i+1],(char **) NULL), exception); break; } if ((LocaleCompare("gaussian-blur",option+1) == 0) || (LocaleCompare("gaussian",option+1) == 0)) { /* Gaussian blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=GaussianBlurImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("geometry",option+1) == 0) { /* Record Image offset, Resize last image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { if ((*image)->geometry != (char *) NULL) (*image)->geometry=DestroyString((*image)->geometry); break; } flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) CloneString(&(*image)->geometry,argv[i+1]); else mogrify_image=ResizeImage(*image,geometry.width,geometry.height, (*image)->filter,exception); break; } if (LocaleCompare("gravity",option+1) == 0) { if (*option == '+') { draw_info->gravity=UndefinedGravity; break; } draw_info->gravity=(GravityType) ParseCommandOption( MagickGravityOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("grayscale",option+1) == 0) { PixelIntensityMethod method; (void) SyncImageSettings(mogrify_info,*image,exception); method=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,argv[i+1]); (void) GrayscaleImage(*image,method,exception); break; } break; } case 'h': { if (LocaleCompare("highlight-color",option+1) == 0) { (void) SetImageArtifact(*image,"compare:highlight-color",argv[i+1]); break; } if (LocaleCompare("hough-lines",option+1) == 0) { /* Detect edges in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=40; mogrify_image=HoughLineImage(*image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception); break; } break; } case 'i': { if (LocaleCompare("identify",option+1) == 0) { char *text; (void) SyncImageSettings(mogrify_info,*image,exception); if (format == (char *) NULL) { (void) IdentifyImage(*image,stdout,mogrify_info->verbose, exception); break; } text=InterpretImageProperties(mogrify_info,*image,format, exception); if (text == (char *) NULL) break; (void) fputs(text,stdout); text=DestroyString(text); break; } if (LocaleCompare("implode",option+1) == 0) { /* Implode image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=ImplodeImage(*image,geometry_info.rho, interpolate_method,exception); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->interline_spacing=geometry_info.rho; break; } if (LocaleCompare("interpolate",option+1) == 0) { interpolate_method=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->interword_spacing=geometry_info.rho; break; } if (LocaleCompare("interpolative-resize",option+1) == 0) { /* Interpolative resize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=InterpolativeResizeImage(*image,geometry.width, geometry.height,interpolate_method,exception); break; } break; } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->kerning=geometry_info.rho; break; } if (LocaleCompare("kuwahara",option+1) == 0) { /* Edge preserving blur. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho-0.5; mogrify_image=KuwaharaImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } break; } case 'l': { if (LocaleCompare("lat",option+1) == 0) { /* Local adaptive threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=AdaptiveThresholdImage(*image,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,(double) geometry_info.xi,exception); break; } if (LocaleCompare("level",option+1) == 0) { double black_point, gamma, white_point; /* Parse levels. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) (QuantumRange/100.0); white_point*=(double) (QuantumRange/100.0); } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((*option == '+') || ((flags & AspectValue) != 0)) (void) LevelizeImage(*image,black_point,white_point,gamma, exception); else (void) LevelImage(*image,black_point,white_point,gamma, exception); break; } if (LocaleCompare("level-colors",option+1) == 0) { char token[MagickPathExtent]; const char *p; PixelInfo black_point, white_point; p=(const char *) argv[i+1]; GetNextToken(p,&p,MagickPathExtent,token); /* get black point color */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryColorCompliance(token,AllCompliance, &black_point,exception); else (void) QueryColorCompliance("#000000",AllCompliance, &black_point,exception); if (isalpha((int) token[0]) || (token[0] == '#')) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '\0') white_point=black_point; /* set everything to that color */ else { if ((isalpha((int) *token) == 0) && ((*token == '#') == 0)) GetNextToken(p,&p,MagickPathExtent,token); /* Get white point color. */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryColorCompliance(token,AllCompliance, &white_point,exception); else (void) QueryColorCompliance("#ffffff",AllCompliance, &white_point,exception); } (void) LevelImageColors(*image,&black_point,&white_point, *option == '+' ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("linear-stretch",option+1) == 0) { double black_point, white_point; (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(double) (*image)->columns*(*image)->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) (*image)->columns*(*image)->rows/100.0; white_point*=(double) (*image)->columns*(*image)->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) (*image)->columns*(*image)->rows- black_point; (void) LinearStretchImage(*image,black_point,white_point,exception); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { /* Liquid rescale image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); if ((flags & XValue) == 0) geometry.x=1; if ((flags & YValue) == 0) geometry.y=0; mogrify_image=LiquidRescaleImage(*image,geometry.width, geometry.height,1.0*geometry.x,1.0*geometry.y,exception); break; } if (LocaleCompare("local-contrast",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & RhoValue) == 0) geometry_info.rho=10; if ((flags & SigmaValue) == 0) geometry_info.sigma=12.5; mogrify_image=LocalContrastImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("lowlight-color",option+1) == 0) { (void) SetImageArtifact(*image,"compare:lowlight-color",argv[i+1]); break; } break; } case 'm': { if (LocaleCompare("magnify",option+1) == 0) { /* Double image size. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=MagnifyImage(*image,exception); break; } if (LocaleCompare("map",option+1) == 0) { Image *remap_image; /* Transform image colors to match this set of colors. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') break; remap_image=GetImageCache(mogrify_info,argv[i+1],exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(quantize_info,*image,remap_image,exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("mask",option+1) == 0) { Image *mask; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,WritePixelMask,(Image *) NULL, exception); break; } /* Set the image mask. */ mask=GetImageCache(mogrify_info,argv[i+1],exception); if (mask == (Image *) NULL) break; (void) SetImageMask(*image,WritePixelMask,mask,exception); mask=DestroyImage(mask); break; } if (LocaleCompare("matte",option+1) == 0) { (void) SetImageAlphaChannel(*image,(*option == '-') ? SetAlphaChannel : DeactivateAlphaChannel,exception); break; } if (LocaleCompare("mean-shift",option+1) == 0) { /* Detect edges in the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=0.10*QuantumRange; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=MeanShiftImage(*image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("median",option+1) == 0) { /* Median filter image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=StatisticImage(*image,MedianStatistic,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); break; } if (LocaleCompare("mode",option+1) == 0) { /* Mode image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=StatisticImage(*image,ModeStatistic,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); break; } if (LocaleCompare("modulate",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) ModulateImage(*image,argv[i+1],exception); break; } if (LocaleCompare("moments",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:moments"); break; } (void) SetImageArtifact(*image,"identify:moments",argv[i+1]); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("monitor",option+1) == 0) { if (*option == '+') { (void) SetImageProgressMonitor(*image, (MagickProgressMonitor) NULL,(void *) NULL); break; } (void) SetImageProgressMonitor(*image,MonitorProgress, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) SetImageType(*image,BilevelType,exception); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MagickPathExtent]; const char *p; KernelInfo *kernel; MorphologyMethod method; ssize_t iterations; /* Morphological Image Operation */ (void) SyncImageSettings(mogrify_info,*image,exception); p=argv[i+1]; GetNextToken(p,&p,MagickPathExtent,token); method=(MorphologyMethod) ParseCommandOption( MagickMorphologyOptions,MagickFalse,token); iterations=1L; GetNextToken(p,&p,MagickPathExtent,token); if ((*p == ':') || (*p == ',')) GetNextToken(p,&p,MagickPathExtent,token); if ((*p != '\0')) iterations=(ssize_t) StringToLong(p); kernel=AcquireKernelInfo(argv[i+2],exception); if (kernel == (KernelInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnabletoParseKernel","morphology"); status=MagickFalse; break; } mogrify_image=MorphologyImage(*image,method,iterations,kernel, exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("motion-blur",option+1) == 0) { /* Motion blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=MotionBlurImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,exception); break; } break; } case 'n': { if (LocaleCompare("negate",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) NegateImage(*image,*option == '+' ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("noise",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '-') { flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=StatisticImage(*image,NonpeakStatistic,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); } else { NoiseType noise; noise=(NoiseType) ParseCommandOption(MagickNoiseOptions, MagickFalse,argv[i+1]); mogrify_image=AddNoiseImage(*image,noise,attenuate,exception); } break; } if (LocaleCompare("normalize",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) NormalizeImage(*image,exception); break; } break; } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { PixelInfo target; (void) SyncImageSettings(mogrify_info,*image,exception); (void) QueryColorCompliance(argv[i+1],AllCompliance,&target, exception); (void) OpaquePaintImage(*image,&target,&fill,*option == '-' ? MagickFalse : MagickTrue,exception); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) OrderedDitherImage(*image,argv[i+1],exception); break; } break; } case 'p': { if (LocaleCompare("paint",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=OilPaintImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("perceptible",option+1) == 0) { /* Perceptible image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) PerceptibleImage(*image,StringToDouble(argv[i+1], (char **) NULL),exception); break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') (void) ParseGeometry("12",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->pointsize=geometry_info.rho; break; } if (LocaleCompare("polaroid",option+1) == 0) { const char *caption; double angle; RandomInfo *random_info; /* Simulate a Polaroid picture. */ (void) SyncImageSettings(mogrify_info,*image,exception); random_info=AcquireRandomInfo(); angle=22.5*(GetPseudoRandomValue(random_info)-0.5); random_info=DestroyRandomInfo(random_info); if (*option == '-') { SetGeometryInfo(&geometry_info); flags=ParseGeometry(argv[i+1],&geometry_info); angle=geometry_info.rho; } caption=GetImageProperty(*image,"caption",exception); mogrify_image=PolaroidImage(*image,draw_info,caption,angle, interpolate_method,exception); break; } if (LocaleCompare("posterize",option+1) == 0) { /* Posterize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) PosterizeImage(*image,StringToUnsignedLong(argv[i+1]), quantize_info->dither_method,exception); break; } if (LocaleCompare("preview",option+1) == 0) { PreviewType preview_type; /* Preview image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') preview_type=UndefinedPreview; else preview_type=(PreviewType) ParseCommandOption( MagickPreviewOptions,MagickFalse,argv[i+1]); mogrify_image=PreviewImage(*image,preview_type,exception); break; } if (LocaleCompare("profile",option+1) == 0) { const char *name; const StringInfo *profile; Image *profile_image; ImageInfo *profile_info; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a profile from the image. */ (void) ProfileImage(*image,argv[i+1],(const unsigned char *) NULL,0,exception); break; } /* Associate a profile with the image. */ profile_info=CloneImageInfo(mogrify_info); profile=GetImageProfile(*image,"iptc"); if (profile != (StringInfo *) NULL) profile_info->profile=(void *) CloneStringInfo(profile); profile_image=GetImageCache(profile_info,argv[i+1],exception); profile_info=DestroyImageInfo(profile_info); if (profile_image == (Image *) NULL) { StringInfo *file_data; profile_info=CloneImageInfo(mogrify_info); (void) CopyMagickString(profile_info->filename,argv[i+1], MagickPathExtent); file_data=FileToStringInfo(profile_info->filename,~0UL, exception); if (file_data != (StringInfo *) NULL) { (void) SetImageInfo(profile_info,0,exception); (void) ProfileImage(*image,profile_info->magick, GetStringInfoDatum(file_data), GetStringInfoLength(file_data),exception); file_data=DestroyStringInfo(file_data); } profile_info=DestroyImageInfo(profile_info); break; } ResetImageProfileIterator(profile_image); name=GetNextImageProfile(profile_image); while (name != (const char *) NULL) { profile=GetImageProfile(profile_image,name); if (profile != (StringInfo *) NULL) (void) ProfileImage(*image,name,GetStringInfoDatum(profile), (size_t) GetStringInfoLength(profile),exception); name=GetNextImageProfile(profile_image); } profile_image=DestroyImage(profile_image); break; } break; } case 'q': { if (LocaleCompare("quantize",option+1) == 0) { if (*option == '+') { quantize_info->colorspace=UndefinedColorspace; break; } quantize_info->colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); break; } break; } case 'r': { if (LocaleCompare("rotational-blur",option+1) == 0) { /* Rotational blur image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); mogrify_image=RotationalBlurImage(*image,geometry_info.rho, exception); break; } if (LocaleCompare("raise",option+1) == 0) { /* Surround image with a raise of solid color. */ flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); (void) RaiseImage(*image,&geometry,*option == '-' ? MagickTrue : MagickFalse,exception); break; } if (LocaleCompare("random-threshold",option+1) == 0) { /* Random threshold image. */ double min_threshold, max_threshold; (void) SyncImageSettings(mogrify_info,*image,exception); min_threshold=0.0; max_threshold=(double) QuantumRange; flags=ParseGeometry(argv[i+1],&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(argv[i+1],'%') != (char *) NULL) { max_threshold*=(double) (0.01*QuantumRange); min_threshold*=(double) (0.01*QuantumRange); } (void) RandomThresholdImage(*image,min_threshold,max_threshold, exception); break; } if (LocaleCompare("range-threshold",option+1) == 0) { /* Range threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=geometry_info.sigma; if ((flags & PsiValue) == 0) geometry_info.psi=geometry_info.xi; if (strchr(argv[i+1],'%') != (char *) NULL) { geometry_info.rho*=(double) (0.01*QuantumRange); geometry_info.sigma*=(double) (0.01*QuantumRange); geometry_info.xi*=(double) (0.01*QuantumRange); geometry_info.psi*=(double) (0.01*QuantumRange); } (void) RangeThresholdImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception); break; } if (LocaleCompare("read-mask",option+1) == 0) { Image *mask; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,ReadPixelMask,(Image *) NULL, exception); break; } /* Set the image mask. */ mask=GetImageCache(mogrify_info,argv[i+1],exception); if (mask == (Image *) NULL) break; (void) SetImageMask(*image,ReadPixelMask,mask,exception); mask=DestroyImage(mask); break; } if (LocaleCompare("region",option+1) == 0) { /* Apply read mask as defined by a region geometry. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { (void) SetImageRegionMask(*image,WritePixelMask, (const RectangleInfo *) NULL,exception); break; } (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); (void) SetImageRegionMask(*image,WritePixelMask,&geometry, exception); break; } if (LocaleCompare("render",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image,exception); draw_info->render=(*option == '+') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("remap",option+1) == 0) { Image *remap_image; /* Transform image colors to match this set of colors. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') break; remap_image=GetImageCache(mogrify_info,argv[i+1],exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(quantize_info,*image,remap_image,exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("repage",option+1) == 0) { if (*option == '+') { (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); break; } (void) ResetImagePage(*image,argv[i+1]); break; } if (LocaleCompare("resample",option+1) == 0) { /* Resample image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=ResampleImage(*image,geometry_info.rho, geometry_info.sigma,(*image)->filter,exception); break; } if (LocaleCompare("resize",option+1) == 0) { /* Resize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ResizeImage(*image,geometry.width,geometry.height, (*image)->filter,exception); break; } if (LocaleCompare("roll",option+1) == 0) { /* Roll image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); if ((flags & PercentValue) != 0) { geometry.x*=(double) (*image)->columns/100.0; geometry.y*=(double) (*image)->rows/100.0; } mogrify_image=RollImage(*image,geometry.x,geometry.y,exception); break; } if (LocaleCompare("rotate",option+1) == 0) { char *rotation; /* Check for conditional image rotation. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (strchr(argv[i+1],'>') != (char *) NULL) if ((*image)->columns <= (*image)->rows) break; if (strchr(argv[i+1],'<') != (char *) NULL) if ((*image)->columns >= (*image)->rows) break; /* Rotate image. */ rotation=ConstantString(argv[i+1]); (void) SubstituteString(&rotation,">",""); (void) SubstituteString(&rotation,"<",""); (void) ParseGeometry(rotation,&geometry_info); rotation=DestroyString(rotation); mogrify_image=RotateImage(*image,geometry_info.rho,exception); break; } break; } case 's': { if (LocaleCompare("sample",option+1) == 0) { /* Sample image with pixel replication. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=SampleImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("scale",option+1) == 0) { /* Resize image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ScaleImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("selective-blur",option+1) == 0) { /* Selectively blur pixels within a contrast threshold. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=SelectiveBlurImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("separate",option+1) == 0) { /* Break channels into separate images. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=SeparateImages(*image,exception); break; } if (LocaleCompare("sepia-tone",option+1) == 0) { double threshold; /* Sepia-tone image. */ (void) SyncImageSettings(mogrify_info,*image,exception); threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); mogrify_image=SepiaToneImage(*image,threshold,exception); break; } if (LocaleCompare("segment",option+1) == 0) { /* Segment image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; (void) SegmentImage(*image,(*image)->colorspace, mogrify_info->verbose,geometry_info.rho,geometry_info.sigma, exception); break; } if (LocaleCompare("set",option+1) == 0) { char *value; /* Set image option. */ if (*option == '+') { if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) DeleteImageRegistry(argv[i+1]+9); else if (LocaleNCompare(argv[i+1],"option:",7) == 0) { (void) DeleteImageOption(mogrify_info,argv[i+1]+7); (void) DeleteImageArtifact(*image,argv[i+1]+7); } else (void) DeleteImageProperty(*image,argv[i+1]); break; } value=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (value == (char *) NULL) break; if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) SetImageRegistry(StringRegistryType,argv[i+1]+9,value, exception); else if (LocaleNCompare(argv[i+1],"option:",7) == 0) { (void) SetImageOption(image_info,argv[i+1]+7,value); (void) SetImageOption(mogrify_info,argv[i+1]+7,value); (void) SetImageArtifact(*image,argv[i+1]+7,value); } else (void) SetImageProperty(*image,argv[i+1],value,exception); value=DestroyString(value); break; } if (LocaleCompare("shade",option+1) == 0) { /* Shade image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=ShadeImage(*image,(*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("shadow",option+1) == 0) { /* Shadow image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=4.0; if ((flags & PsiValue) == 0) geometry_info.psi=4.0; mogrify_image=ShadowImage(*image,geometry_info.rho, geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5), (ssize_t) ceil(geometry_info.psi-0.5),exception); break; } if (LocaleCompare("sharpen",option+1) == 0) { /* Sharpen image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.0; mogrify_image=SharpenImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("shave",option+1) == 0) { /* Shave the image edges. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ShaveImage(*image,&geometry,exception); break; } if (LocaleCompare("shear",option+1) == 0) { /* Shear image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=ShearImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { /* Sigmoidal non-linearity contrast control. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=(double) QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/ 100.0; (void) SigmoidalContrastImage(*image,(*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho,geometry_info.sigma, exception); break; } if (LocaleCompare("sketch",option+1) == 0) { /* Sketch image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=SketchImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("solarize",option+1) == 0) { double threshold; (void) SyncImageSettings(mogrify_info,*image,exception); threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); (void) SolarizeImage(*image,threshold,exception); break; } if (LocaleCompare("sparse-color",option+1) == 0) { SparseColorMethod method; char *arguments; /* Sparse Color Interpolated Gradient */ (void) SyncImageSettings(mogrify_info,*image,exception); method=(SparseColorMethod) ParseCommandOption( MagickSparseColorOptions,MagickFalse,argv[i+1]); arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2], exception); if (arguments == (char *) NULL) break; mogrify_image=SparseColorOption(*image,method,arguments, option[0] == '+' ? MagickTrue : MagickFalse,exception); arguments=DestroyString(arguments); break; } if (LocaleCompare("splice",option+1) == 0) { /* Splice a solid color into the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=SpliceImage(*image,&geometry,exception); break; } if (LocaleCompare("spread",option+1) == 0) { /* Spread an image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=SpreadImage(*image,interpolate_method, geometry_info.rho,exception); break; } if (LocaleCompare("statistic",option+1) == 0) { StatisticType type; (void) SyncImageSettings(mogrify_info,*image,exception); type=(StatisticType) ParseCommandOption(MagickStatisticOptions, MagickFalse,argv[i+1]); (void) ParseGeometry(argv[i+2],&geometry_info); mogrify_image=StatisticImage(*image,type,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,exception); break; } if (LocaleCompare("stretch",option+1) == 0) { if (*option == '+') { draw_info->stretch=UndefinedStretch; break; } draw_info->stretch=(StretchType) ParseCommandOption( MagickStretchOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("strip",option+1) == 0) { /* Strip image of profiles and comments. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) StripImage(*image,exception); break; } if (LocaleCompare("stroke",option+1) == 0) { ExceptionInfo *sans; PixelInfo color; if (*option == '+') { (void) QueryColorCompliance("none",AllCompliance, &draw_info->stroke,exception); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage( draw_info->stroke_pattern); break; } sans=AcquireExceptionInfo(); status=QueryColorCompliance(argv[i+1],AllCompliance,&color,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) draw_info->stroke_pattern=GetImageCache(mogrify_info,argv[i+1], exception); else draw_info->stroke=color; break; } if (LocaleCompare("strokewidth",option+1) == 0) { draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL); break; } if (LocaleCompare("style",option+1) == 0) { if (*option == '+') { draw_info->style=UndefinedStyle; break; } draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,argv[i+1]); break; } if (LocaleCompare("swirl",option+1) == 0) { /* Swirl image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=SwirlImage(*image,geometry_info.rho, interpolate_method,exception); break; } break; } case 't': { if (LocaleCompare("threshold",option+1) == 0) { double threshold; /* Threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') threshold=(double) QuantumRange/2; else threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); (void) BilevelImage(*image,threshold,exception); break; } if (LocaleCompare("thumbnail",option+1) == 0) { /* Thumbnail image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ThumbnailImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("tile",option+1) == 0) { if (*option == '+') { if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); break; } draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1], exception); break; } if (LocaleCompare("tint",option+1) == 0) { /* Tint the image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=TintImage(*image,argv[i+1],&fill,exception); break; } if (LocaleCompare("transform",option+1) == 0) { /* Affine transform image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=AffineTransformImage(*image,&draw_info->affine, exception); break; } if (LocaleCompare("transparent",option+1) == 0) { PixelInfo target; (void) SyncImageSettings(mogrify_info,*image,exception); (void) QueryColorCompliance(argv[i+1],AllCompliance,&target, exception); (void) TransparentPaintImage(*image,&target,(Quantum) TransparentAlpha,*option == '-' ? MagickFalse : MagickTrue, exception); break; } if (LocaleCompare("transpose",option+1) == 0) { /* Transpose image scanlines. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=TransposeImage(*image,exception); break; } if (LocaleCompare("transverse",option+1) == 0) { /* Transverse image scanlines. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=TransverseImage(*image,exception); break; } if (LocaleCompare("treedepth",option+1) == 0) { quantize_info->tree_depth=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("trim",option+1) == 0) { /* Trim image. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=TrimImage(*image,exception); break; } if (LocaleCompare("type",option+1) == 0) { ImageType type; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') type=UndefinedType; else type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, argv[i+1]); (*image)->type=UndefinedType; (void) SetImageType(*image,type,exception); break; } break; } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { (void) QueryColorCompliance(argv[i+1],AllCompliance, &draw_info->undercolor,exception); break; } if (LocaleCompare("unique",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:unique-colors"); break; } (void) SetImageArtifact(*image,"identify:unique-colors","true"); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("unique-colors",option+1) == 0) { /* Unique image colors. */ (void) SyncImageSettings(mogrify_info,*image,exception); mogrify_image=UniqueImageColors(*image,exception); break; } if (LocaleCompare("unsharp",option+1) == 0) { /* Unsharp mask image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; if ((flags & PsiValue) == 0) geometry_info.psi=0.05; mogrify_image=UnsharpMaskImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi, exception); break; } break; } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { (void) SetImageArtifact(*image,option+1, *option == '+' ? "false" : "true"); break; } if (LocaleCompare("vignette",option+1) == 0) { /* Vignette image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.1*(*image)->columns; if ((flags & PsiValue) == 0) geometry_info.psi=0.1*(*image)->rows; if ((flags & PercentValue) != 0) { geometry_info.xi*=(double) (*image)->columns/100.0; geometry_info.psi*=(double) (*image)->rows/100.0; } mogrify_image=VignetteImage(*image,geometry_info.rho, geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5), (ssize_t) ceil(geometry_info.psi-0.5),exception); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { if (*option == '+') { (void) SetImageVirtualPixelMethod(*image, UndefinedVirtualPixelMethod,exception); break; } (void) SetImageVirtualPixelMethod(*image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i+1]),exception); break; } break; } case 'w': { if (LocaleCompare("wave",option+1) == 0) { /* Wave image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=WaveImage(*image,geometry_info.rho, geometry_info.sigma,interpolate_method,exception); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { /* Wavelet denoise image. */ (void) SyncImageSettings(mogrify_info,*image,exception); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) { geometry_info.rho=QuantumRange*geometry_info.rho/100.0; geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0; } if ((flags & SigmaValue) == 0) geometry_info.sigma=0.0; mogrify_image=WaveletDenoiseImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("weight",option+1) == 0) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse, argv[i+1]); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(argv[i+1]); draw_info->weight=(size_t) weight; break; } if (LocaleCompare("white-threshold",option+1) == 0) { /* White threshold image. */ (void) SyncImageSettings(mogrify_info,*image,exception); (void) WhiteThresholdImage(*image,argv[i+1],exception); break; } if (LocaleCompare("write-mask",option+1) == 0) { Image *mask; (void) SyncImageSettings(mogrify_info,*image,exception); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,WritePixelMask,(Image *) NULL, exception); break; } /* Set the image mask. */ mask=GetImageCache(mogrify_info,argv[i+1],exception); if (mask == (Image *) NULL) break; (void) SetImageMask(*image,WritePixelMask,mask,exception); mask=DestroyImage(mask); break; } break; } default: break; } /* Replace current image with any image that was generated */ if (mogrify_image != (Image *) NULL) ReplaceImageInListReturnLast(image,mogrify_image); i+=count; } /* Free resources. */ quantize_info=DestroyQuantizeInfo(quantize_info); draw_info=DestroyDrawInfo(draw_info); mogrify_info=DestroyImageInfo(mogrify_info); status=(MagickStatusType) (exception->severity < ErrorException ? 1 : 0); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e C o m m a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageCommand() transforms an image or a sequence of images. These % transforms include image scaling, image rotation, color reduction, and % others. The transmogrified image overwrites the original image. % % The format of the MogrifyImageCommand method is: % % MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,int argc, % const char **argv,char **metadata,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o argc: the number of elements in the argument vector. % % o argv: A text array containing the command line arguments. % % o metadata: any metadata is returned here. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType MogrifyUsage(void) { static const char channel_operators[] = " -channel-fx expression\n" " exchange, extract, or transfer one or more image channels\n" " -separate separate an image channel into a grayscale image", miscellaneous[] = " -debug events display copious debugging information\n" " -distribute-cache port\n" " distributed pixel cache spanning one or more servers\n" " -help print program options\n" " -list type print a list of supported option arguments\n" " -log format format of debugging information\n" " -version print version information", operators[] = " -adaptive-blur geometry\n" " adaptively blur pixels; decrease effect near edges\n" " -adaptive-resize geometry\n" " adaptively resize image using 'mesh' interpolation\n" " -adaptive-sharpen geometry\n" " adaptively sharpen pixels; increase effect near edges\n" " -alpha option on, activate, off, deactivate, set, opaque, copy\n" " transparent, extract, background, or shape\n" " -annotate geometry text\n" " annotate the image with text\n" " -auto-gamma automagically adjust gamma level of image\n" " -auto-level automagically adjust color levels of image\n" " -auto-orient automagically orient (rotate) image\n" " -auto-threshold method\n" " automatically perform image thresholding\n" " -bench iterations measure performance\n" " -black-threshold value\n" " force all pixels below the threshold into black\n" " -blue-shift simulate a scene at nighttime in the moonlight\n" " -blur geometry reduce image noise and reduce detail levels\n" " -border geometry surround image with a border of color\n" " -bordercolor color border color\n" " -brightness-contrast geometry\n" " improve brightness / contrast of the image\n" " -canny geometry detect edges in the image\n" " -cdl filename color correct with a color decision list\n" " -channel mask set the image channel mask\n" " -charcoal geometry simulate a charcoal drawing\n" " -chop geometry remove pixels from the image interior\n" " -clahe geometry contrast limited adaptive histogram equalization\n" " -clamp keep pixel values in range (0-QuantumRange)\n" " -clip clip along the first path from the 8BIM profile\n" " -clip-mask filename associate a clip mask with the image\n" " -clip-path id clip along a named path from the 8BIM profile\n" " -colorize value colorize the image with the fill color\n" " -color-matrix matrix apply color correction to the image\n" " -connected-components connectivity\n" " connected-components uniquely labeled\n" " -contrast enhance or reduce the image contrast\n" " -contrast-stretch geometry\n" " improve contrast by 'stretching' the intensity range\n" " -convolve coefficients\n" " apply a convolution kernel to the image\n" " -cycle amount cycle the image colormap\n" " -decipher filename convert cipher pixels to plain pixels\n" " -deskew threshold straighten an image\n" " -despeckle reduce the speckles within an image\n" " -distort method args\n" " distort images according to given method ad args\n" " -draw string annotate the image with a graphic primitive\n" " -edge radius apply a filter to detect edges in the image\n" " -encipher filename convert plain pixels to cipher pixels\n" " -emboss radius emboss an image\n" " -enhance apply a digital filter to enhance a noisy image\n" " -equalize perform histogram equalization to an image\n" " -evaluate operator value\n" " evaluate an arithmetic, relational, or logical expression\n" " -extent geometry set the image size\n" " -extract geometry extract area from image\n" " -fft implements the discrete Fourier transform (DFT)\n" " -flip flip image vertically\n" " -floodfill geometry color\n" " floodfill the image with color\n" " -flop flop image horizontally\n" " -frame geometry surround image with an ornamental border\n" " -function name parameters\n" " apply function over image values\n" " -gamma value level of gamma correction\n" " -gaussian-blur geometry\n" " reduce image noise and reduce detail levels\n" " -geometry geometry preferred size or location of the image\n" " -grayscale method convert image to grayscale\n" " -hough-lines geometry\n" " identify lines in the image\n" " -identify identify the format and characteristics of the image\n" " -ift implements the inverse discrete Fourier transform (DFT)\n" " -implode amount implode image pixels about the center\n" " -interpolative-resize geometry\n" " resize image using interpolation\n" " -kuwahara geometry edge preserving noise reduction filter\n" " -lat geometry local adaptive thresholding\n" " -level value adjust the level of image contrast\n" " -level-colors color,color\n" " level image with the given colors\n" " -linear-stretch geometry\n" " improve contrast by 'stretching with saturation'\n" " -liquid-rescale geometry\n" " rescale image with seam-carving\n" " -local-contrast geometry\n" " enhance local contrast\n" " -magnify double the size of the image with pixel art scaling\n" " -mean-shift geometry delineate arbitrarily shaped clusters in the image\n" " -median geometry apply a median filter to the image\n" " -mode geometry make each pixel the 'predominant color' of the\n" " neighborhood\n" " -modulate value vary the brightness, saturation, and hue\n" " -monochrome transform image to black and white\n" " -morphology method kernel\n" " apply a morphology method to the image\n" " -motion-blur geometry\n" " simulate motion blur\n" " -negate replace every pixel with its complementary color \n" " -noise geometry add or reduce noise in an image\n" " -normalize transform image to span the full range of colors\n" " -opaque color change this color to the fill color\n" " -ordered-dither NxN\n" " add a noise pattern to the image with specific\n" " amplitudes\n" " -paint radius simulate an oil painting\n" " -perceptible epsilon\n" " pixel value less than |epsilon| become epsilon or\n" " -epsilon\n" " -polaroid angle simulate a Polaroid picture\n" " -posterize levels reduce the image to a limited number of color levels\n" " -profile filename add, delete, or apply an image profile\n" " -quantize colorspace reduce colors in this colorspace\n" " -raise value lighten/darken image edges to create a 3-D effect\n" " -random-threshold low,high\n" " random threshold the image\n" " -range-threshold values\n" " perform either hard or soft thresholding within some range of values in an image\n" " -region geometry apply options to a portion of the image\n" " -render render vector graphics\n" " -repage geometry size and location of an image canvas\n" " -resample geometry change the resolution of an image\n" " -resize geometry resize the image\n" " -roll geometry roll an image vertically or horizontally\n" " -rotate degrees apply Paeth rotation to the image\n" " -rotational-blur angle\n" " rotational blur the image\n" " -sample geometry scale image with pixel sampling\n" " -scale geometry scale the image\n" " -segment values segment an image\n" " -selective-blur geometry\n" " selectively blur pixels within a contrast threshold\n" " -sepia-tone threshold\n" " simulate a sepia-toned photo\n" " -set property value set an image property\n" " -shade degrees shade the image using a distant light source\n" " -shadow geometry simulate an image shadow\n" " -sharpen geometry sharpen the image\n" " -shave geometry shave pixels from the image edges\n" " -shear geometry slide one edge of the image along the X or Y axis\n" " -sigmoidal-contrast geometry\n" " increase the contrast without saturating highlights or\n" " shadows\n" " -sketch geometry simulate a pencil sketch\n" " -solarize threshold negate all pixels above the threshold level\n" " -sparse-color method args\n" " fill in a image based on a few color points\n" " -splice geometry splice the background color into the image\n" " -spread radius displace image pixels by a random amount\n" " -statistic type radius\n" " replace each pixel with corresponding statistic from the neighborhood\n" " -strip strip image of all profiles and comments\n" " -swirl degrees swirl image pixels about the center\n" " -threshold value threshold the image\n" " -thumbnail geometry create a thumbnail of the image\n" " -tile filename tile image when filling a graphic primitive\n" " -tint value tint the image with the fill color\n" " -transform affine transform image\n" " -transparent color make this color transparent within the image\n" " -transpose flip image vertically and rotate 90 degrees\n" " -transverse flop image horizontally and rotate 270 degrees\n" " -trim trim image edges\n" " -type type image type\n" " -unique-colors discard all but one of any pixel color\n" " -unsharp geometry sharpen the image\n" " -vignette geometry soften the edges of the image in vignette style\n" " -wave geometry alter an image along a sine wave\n" " -wavelet-denoise threshold\n" " removes noise from the image using a wavelet transform\n" " -white-threshold value\n" " force all pixels above the threshold into white", sequence_operators[] = " -affinity filename transform image colors to match this set of colors\n" " -append append an image sequence\n" " -clut apply a color lookup table to the image\n" " -coalesce merge a sequence of images\n" " -combine combine a sequence of images\n" " -compare mathematically and visually annotate the difference between an image and its reconstruction\n" " -complex operator perform complex mathematics on an image sequence\n" " -composite composite image\n" " -copy geometry offset\n" " copy pixels from one area of an image to another\n" " -crop geometry cut out a rectangular region of the image\n" " -deconstruct break down an image sequence into constituent parts\n" " -evaluate-sequence operator\n" " evaluate an arithmetic, relational, or logical expression\n" " -flatten flatten a sequence of images\n" " -fx expression apply mathematical expression to an image channel(s)\n" " -hald-clut apply a Hald color lookup table to the image\n" " -layers method optimize, merge, or compare image layers\n" " -morph value morph an image sequence\n" " -mosaic create a mosaic from an image sequence\n" " -poly terms build a polynomial from the image sequence and the corresponding\n" " terms (coefficients and degree pairs).\n" " -print string interpret string and print to console\n" " -process arguments process the image with a custom image filter\n" " -smush geometry smush an image sequence together\n" " -write filename write images to this file", settings[] = " -adjoin join images into a single multi-image file\n" " -affine matrix affine transform matrix\n" " -alpha option activate, deactivate, reset, or set the alpha channel\n" " -antialias remove pixel-aliasing\n" " -authenticate password\n" " decipher image with this password\n" " -attenuate value lessen (or intensify) when adding noise to an image\n" " -background color background color\n" " -bias value add bias when convolving an image\n" " -black-point-compensation\n" " use black point compensation\n" " -blue-primary point chromaticity blue primary point\n" " -bordercolor color border color\n" " -caption string assign a caption to an image\n" " -colors value preferred number of colors in the image\n" " -colorspace type alternate image colorspace\n" " -comment string annotate image with comment\n" " -compose operator set image composite operator\n" " -compress type type of pixel compression when writing the image\n" " -define format:option=value\n" " define one or more image format options\n" " -delay value display the next image after pausing\n" " -density geometry horizontal and vertical density of the image\n" " -depth value image depth\n" " -direction type render text right-to-left or left-to-right\n" " -display server get image or font from this X server\n" " -dispose method layer disposal method\n" " -dither method apply error diffusion to image\n" " -encoding type text encoding type\n" " -endian type endianness (MSB or LSB) of the image\n" " -family name render text with this font family\n" " -features distance analyze image features (e.g. contrast, correlation)\n" " -fill color color to use when filling a graphic primitive\n" " -filter type use this filter when resizing an image\n" " -font name render text with this font\n" " -format \"string\" output formatted image characteristics\n" " -fuzz distance colors within this distance are considered equal\n" " -gravity type horizontal and vertical text placement\n" " -green-primary point chromaticity green primary point\n" " -intensity method method to generate an intensity value from a pixel\n" " -intent type type of rendering intent when managing the image color\n" " -interlace type type of image interlacing scheme\n" " -interline-spacing value\n" " set the space between two text lines\n" " -interpolate method pixel color interpolation method\n" " -interword-spacing value\n" " set the space between two words\n" " -kerning value set the space between two letters\n" " -label string assign a label to an image\n" " -limit type value pixel cache resource limit\n" " -loop iterations add Netscape loop extension to your GIF animation\n" " -matte store matte channel if the image has one\n" " -mattecolor color frame color\n" " -monitor monitor progress\n" " -orient type image orientation\n" " -page geometry size and location of an image canvas (setting)\n" " -path path write images to this path on disk\n" " -ping efficiently determine image attributes\n" " -pointsize value font point size\n" " -precision value maximum number of significant digits to print\n" " -preview type image preview type\n" " -quality value JPEG/MIFF/PNG compression level\n" " -quiet suppress all warning messages\n" " -read-mask filename associate a read mask with the image\n" " -red-primary point chromaticity red primary point\n" " -regard-warnings pay attention to warning messages\n" " -remap filename transform image colors to match this set of colors\n" " -respect-parentheses settings remain in effect until parenthesis boundary\n" " -sampling-factor geometry\n" " horizontal and vertical sampling factor\n" " -scene value image scene number\n" " -seed value seed a new sequence of pseudo-random numbers\n" " -size geometry width and height of image\n" " -stretch type render text with this font stretch\n" " -stroke color graphic primitive stroke color\n" " -strokewidth value graphic primitive stroke width\n" " -style type render text with this font style\n" " -synchronize synchronize image to storage device\n" " -taint declare the image as modified\n" " -texture filename name of texture to tile onto the image background\n" " -tile-offset geometry\n" " tile offset\n" " -treedepth value color tree depth\n" " -transparent-color color\n" " transparent color\n" " -undercolor color annotation bounding box color\n" " -units type the units of image resolution\n" " -verbose print detailed information about the image\n" " -view FlashPix viewing transforms\n" " -virtual-pixel method\n" " virtual pixel access method\n" " -weight type render text with this font weight\n" " -white-point point chromaticity white point\n" " -write-mask filename associate a write mask with the image", stack_operators[] = " -delete indexes delete the image from the image sequence\n" " -duplicate count,indexes\n" " duplicate an image one or more times\n" " -insert index insert last image into the image sequence\n" " -reverse reverse image sequence\n" " -swap indexes swap two images in the image sequence"; ListMagickVersion(stdout); (void) printf("Usage: %s [options ...] file [ [options ...] file ...]\n", GetClientName()); (void) printf("\nImage Settings:\n"); (void) puts(settings); (void) printf("\nImage Operators:\n"); (void) puts(operators); (void) printf("\nImage Channel Operators:\n"); (void) puts(channel_operators); (void) printf("\nImage Sequence Operators:\n"); (void) puts(sequence_operators); (void) printf("\nImage Stack Operators:\n"); (void) puts(stack_operators); (void) printf("\nMiscellaneous Options:\n"); (void) puts(miscellaneous); (void) printf( "\nBy default, the image format of 'file' is determined by its magic\n"); (void) printf( "number. To specify a particular image format, precede the filename\n"); (void) printf( "with an image format name and a colon (i.e. ps:image) or specify the\n"); (void) printf( "image type as the filename suffix (i.e. image.ps). Specify 'file' as\n"); (void) printf("'-' for standard input or output.\n"); return(MagickFalse); } WandExport MagickBooleanType MogrifyImageCommand(ImageInfo *image_info, int argc,char **argv,char **wand_unused(metadata),ExceptionInfo *exception) { #define DestroyMogrify() \ { \ if (format != (char *) NULL) \ format=DestroyString(format); \ if (path != (char *) NULL) \ path=DestroyString(path); \ DestroyImageStack(); \ for (i=0; i < (ssize_t) argc; i++) \ argv[i]=DestroyString(argv[i]); \ argv=(char **) RelinquishMagickMemory(argv); \ } #define ThrowMogrifyException(asperity,tag,option) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),asperity,tag,"`%s'", \ option); \ DestroyMogrify(); \ return(MagickFalse); \ } #define ThrowMogrifyInvalidArgumentException(option,argument) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),OptionError, \ "InvalidArgument","'%s': %s",argument,option); \ DestroyMogrify(); \ return(MagickFalse); \ } char *format, *option, *path; Image *image; ImageStack image_stack[MaxImageStackDepth+1]; MagickBooleanType global_colormap; MagickBooleanType fire, pend, respect_parenthesis; MagickStatusType status; register ssize_t i; ssize_t j, k; wand_unreferenced(metadata); /* Set defaults. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(exception != (ExceptionInfo *) NULL); if (argc == 2) { option=argv[1]; if ((LocaleCompare("version",option+1) == 0) || (LocaleCompare("-version",option+1) == 0)) { ListMagickVersion(stdout); return(MagickTrue); } } if (argc < 2) return(MogrifyUsage()); format=(char *) NULL; path=(char *) NULL; global_colormap=MagickFalse; k=0; j=1; NewImageStack(); option=(char *) NULL; pend=MagickFalse; respect_parenthesis=MagickFalse; status=MagickTrue; /* Parse command line. */ ReadCommandlLine(argc,&argv); status=ExpandFilenames(&argc,&argv); if (status == MagickFalse) ThrowMogrifyException(ResourceLimitError,"MemoryAllocationFailed", GetExceptionMessage(errno)); for (i=1; i < (ssize_t) argc; i++) { option=argv[i]; if (LocaleCompare(option,"(") == 0) { FireImageStack(MagickFalse,MagickTrue,pend); if (k == MaxImageStackDepth) ThrowMogrifyException(OptionError,"ParenthesisNestedTooDeeply", option); PushImageStack(); continue; } if (LocaleCompare(option,")") == 0) { FireImageStack(MagickFalse,MagickTrue,MagickTrue); if (k == 0) ThrowMogrifyException(OptionError,"UnableToParseExpression",option); PopImageStack(); continue; } if (IsCommandOption(option) == MagickFalse) { char backup_filename[MagickPathExtent], *filename; Image *images; struct stat properties; /* Option is a file name: begin by reading image from specified file. */ FireImageStack(MagickFalse,MagickFalse,pend); filename=argv[i]; if ((LocaleCompare(filename,"--") == 0) && (i < (ssize_t) (argc-1))) filename=argv[++i]; images=ReadImages(image_info,filename,exception); status&=(images != (Image *) NULL) && (exception->severity < ErrorException); if (images == (Image *) NULL) continue; properties=(*GetBlobProperties(images)); if (format != (char *) NULL) (void) CopyMagickString(images->filename,images->magick_filename, MagickPathExtent); if (path != (char *) NULL) { GetPathComponent(option,TailPath,filename); (void) FormatLocaleString(images->filename,MagickPathExtent, "%s%c%s",path,*DirectorySeparator,filename); } if (format != (char *) NULL) AppendImageFormat(format,images->filename); AppendImageStack(images); FinalizeImageSettings(image_info,image,MagickFalse); if (global_colormap != MagickFalse) { QuantizeInfo *quantize_info; quantize_info=AcquireQuantizeInfo(image_info); (void) RemapImages(quantize_info,images,(Image *) NULL,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } *backup_filename='\0'; if ((LocaleCompare(image->filename,"-") != 0) && (IsPathWritable(image->filename) != MagickFalse)) { /* Rename image file as backup. */ (void) CopyMagickString(backup_filename,image->filename, MagickPathExtent); for (j=0; j < 6; j++) { (void) ConcatenateMagickString(backup_filename,"~", MagickPathExtent); if (IsPathAccessible(backup_filename) == MagickFalse) break; } if ((IsPathAccessible(backup_filename) != MagickFalse) || (rename_utf8(image->filename,backup_filename) != 0)) *backup_filename='\0'; } /* Write transmogrified image to disk. */ image_info->synchronize=MagickTrue; status&=WriteImages(image_info,image,image->filename,exception); if (status != MagickFalse) { #if defined(MAGICKCORE_HAVE_UTIME) { MagickBooleanType preserve_timestamp; preserve_timestamp=IsStringTrue(GetImageOption(image_info, "preserve-timestamp")); if (preserve_timestamp != MagickFalse) { struct utimbuf timestamp; timestamp.actime=properties.st_atime; timestamp.modtime=properties.st_mtime; (void) utime(image->filename,&timestamp); } } #endif if (*backup_filename != '\0') (void) remove_utf8(backup_filename); } RemoveAllImageStack(); continue; } pend=image != (Image *) NULL ? MagickTrue : MagickFalse; switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("affine",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("alpha",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickAlphaChannelOptions,MagickFalse, argv[i]); if (type < 0) ThrowMogrifyException(OptionError, "UnrecognizedAlphaChannelOption",argv[i]); break; } if (LocaleCompare("annotate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); i++; break; } if (LocaleCompare("antialias",option+1) == 0) break; if (LocaleCompare("append",option+1) == 0) break; if (LocaleCompare("attenuate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("authenticate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("auto-gamma",option+1) == 0) break; if (LocaleCompare("auto-level",option+1) == 0) break; if (LocaleCompare("auto-orient",option+1) == 0) break; if (LocaleCompare("auto-threshold",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickAutoThresholdOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedThresholdMethod", argv[i]); break; } if (LocaleCompare("average",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'b': { if (LocaleCompare("background",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("bias",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) break; if (LocaleCompare("black-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blue-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blue-shift",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("border",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("box",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'c': { if (LocaleCompare("cache",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("canny",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("caption",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("channel",option+1) == 0) { ssize_t channel; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); channel=ParseChannelOption(argv[i]); if (channel < 0) ThrowMogrifyException(OptionError,"UnrecognizedChannelType", argv[i]); break; } if (LocaleCompare("channel-fx",option+1) == 0) { ssize_t channel; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); channel=ParsePixelChannelOption(argv[i]); if (channel < 0) ThrowMogrifyException(OptionError,"UnrecognizedChannelType", argv[i]); break; } if (LocaleCompare("cdl",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("charcoal",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("chop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("clahe",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("clamp",option+1) == 0) break; if (LocaleCompare("clip",option+1) == 0) break; if (LocaleCompare("clip-mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("clut",option+1) == 0) break; if (LocaleCompare("coalesce",option+1) == 0) break; if (LocaleCompare("colorize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel_info; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i],exception); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("colors",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("colorspace",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("combine",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("compare",option+1) == 0) break; if (LocaleCompare("comment",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("composite",option+1) == 0) break; if (LocaleCompare("compress",option+1) == 0) { ssize_t compress; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); compress=ParseCommandOption(MagickCompressOptions,MagickFalse, argv[i]); if (compress < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageCompression", argv[i]); break; } if (LocaleCompare("concurrent",option+1) == 0) break; if (LocaleCompare("connected-components",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("contrast",option+1) == 0) break; if (LocaleCompare("contrast-stretch",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("convolve",option+1) == 0) { KernelInfo *kernel_info; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i],exception); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("copy",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("crop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("cycle",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("deconstruct",option+1) == 0) break; if (LocaleCompare("debug",option+1) == 0) { ssize_t event; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); event=ParseCommandOption(MagickLogEventOptions,MagickFalse,argv[i]); if (event < 0) ThrowMogrifyException(OptionError,"UnrecognizedEventType", argv[i]); (void) SetLogEventMask(argv[i]); break; } if (LocaleCompare("define",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') { const char *define; define=GetImageOption(image_info,argv[i]); if (define == (const char *) NULL) ThrowMogrifyException(OptionError,"NoSuchOption",argv[i]); break; } break; } if (LocaleCompare("delay",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("delete",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("density",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("depth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("deskew",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("despeckle",option+1) == 0) break; if (LocaleCompare("dft",option+1) == 0) break; if (LocaleCompare("direction",option+1) == 0) { ssize_t direction; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, argv[i]); if (direction < 0) ThrowMogrifyException(OptionError,"UnrecognizedDirectionType", argv[i]); break; } if (LocaleCompare("display",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("dispose",option+1) == 0) { ssize_t dispose; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse, argv[i]); if (dispose < 0) ThrowMogrifyException(OptionError,"UnrecognizedDisposeMethod", argv[i]); break; } if (LocaleCompare("distort",option+1) == 0) { ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickDistortOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedDistortMethod", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("dither",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickDitherOptions,MagickFalse,argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedDitherMethod", argv[i]); break; } if (LocaleCompare("draw",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("duplicate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("duration",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'e': { if (LocaleCompare("edge",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("emboss",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("encipher",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("encoding",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("endian",option+1) == 0) { ssize_t endian; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); endian=ParseCommandOption(MagickEndianOptions,MagickFalse,argv[i]); if (endian < 0) ThrowMogrifyException(OptionError,"UnrecognizedEndianType", argv[i]); break; } if (LocaleCompare("enhance",option+1) == 0) break; if (LocaleCompare("equalize",option+1) == 0) break; if (LocaleCompare("evaluate",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("evaluate-sequence",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator", argv[i]); break; } if (LocaleCompare("extent",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("extract",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("features",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("fill",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("filter",option+1) == 0) { ssize_t filter; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); filter=ParseCommandOption(MagickFilterOptions,MagickFalse,argv[i]); if (filter < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageFilter", argv[i]); break; } if (LocaleCompare("flatten",option+1) == 0) break; if (LocaleCompare("flip",option+1) == 0) break; if (LocaleCompare("flop",option+1) == 0) break; if (LocaleCompare("floodfill",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("format",option+1) == 0) { (void) CopyMagickString(argv[i]+1,"sans",MagickPathExtent); (void) CloneString(&format,(char *) NULL); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); (void) CloneString(&format,argv[i]); (void) CopyMagickString(image_info->filename,format, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,":", MagickPathExtent); (void) SetImageInfo(image_info,0,exception); if (*image_info->magick == '\0') ThrowMogrifyException(OptionError,"UnrecognizedImageFormat", format); break; } if (LocaleCompare("frame",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("function",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickFunctionOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedFunction",argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("fuzz",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("fx",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if ((LocaleCompare("gaussian-blur",option+1) == 0) || (LocaleCompare("gaussian",option+1) == 0)) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("geometry",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("gravity",option+1) == 0) { ssize_t gravity; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse, argv[i]); if (gravity < 0) ThrowMogrifyException(OptionError,"UnrecognizedGravityType", argv[i]); break; } if (LocaleCompare("grayscale",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickPixelIntensityOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedIntensityMethod", argv[i]); break; } if (LocaleCompare("green-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) break; if ((LocaleCompare("help",option+1) == 0) || (LocaleCompare("-help",option+1) == 0)) return(MogrifyUsage()); if (LocaleCompare("hough-lines",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'i': { if (LocaleCompare("identify",option+1) == 0) break; if (LocaleCompare("idft",option+1) == 0) break; if (LocaleCompare("implode",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("intensity",option+1) == 0) { ssize_t intensity; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); intensity=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,argv[i]); if (intensity < 0) ThrowMogrifyException(OptionError, "UnrecognizedPixelIntensityMethod",argv[i]); break; } if (LocaleCompare("intent",option+1) == 0) { ssize_t intent; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); intent=ParseCommandOption(MagickIntentOptions,MagickFalse,argv[i]); if (intent < 0) ThrowMogrifyException(OptionError,"UnrecognizedIntentType", argv[i]); break; } if (LocaleCompare("interlace",option+1) == 0) { ssize_t interlace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); interlace=ParseCommandOption(MagickInterlaceOptions,MagickFalse, argv[i]); if (interlace < 0) ThrowMogrifyException(OptionError,"UnrecognizedInterlaceType", argv[i]); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("interpolate",option+1) == 0) { ssize_t interpolate; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); interpolate=ParseCommandOption(MagickInterpolateOptions,MagickFalse, argv[i]); if (interpolate < 0) ThrowMogrifyException(OptionError,"UnrecognizedInterpolateMethod", argv[i]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("kuwahara",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'l': { if (LocaleCompare("label",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("lat",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); } if (LocaleCompare("layers",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickLayerOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedLayerMethod", argv[i]); break; } if (LocaleCompare("level",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("level-colors",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("limit",option+1) == 0) { char *p; double value; ssize_t resource; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); resource=ParseCommandOption(MagickResourceOptions,MagickFalse, argv[i]); if (resource < 0) ThrowMogrifyException(OptionError,"UnrecognizedResourceType", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); value=StringToDouble(argv[i],&p); (void) value; if ((p == argv[i]) && (LocaleCompare("unlimited",argv[i]) != 0)) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("list",option+1) == 0) { ssize_t list; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i]); if (list < 0) ThrowMogrifyException(OptionError,"UnrecognizedListType",argv[i]); status=MogrifyImageInfo(image_info,(int) (i-j+1),(const char **) argv+j,exception); return(status == 0 ? MagickFalse : MagickTrue); } if (LocaleCompare("log",option+1) == 0) { if (*option == '+') break; i++; if ((i == (ssize_t) argc) || (strchr(argv[i],'%') == (char *) NULL)) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("loop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'm': { if (LocaleCompare("magnify",option+1) == 0) break; if (LocaleCompare("map",option+1) == 0) { global_colormap=(*option == '+') ? MagickTrue : MagickFalse; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("matte",option+1) == 0) break; if (LocaleCompare("mattecolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("maximum",option+1) == 0) break; if (LocaleCompare("mean-shift",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("median",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("metric",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickMetricOptions,MagickTrue,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedMetricType", argv[i]); break; } if (LocaleCompare("minimum",option+1) == 0) break; if (LocaleCompare("modulate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("mode",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("monitor",option+1) == 0) break; if (LocaleCompare("monochrome",option+1) == 0) break; if (LocaleCompare("morph",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MagickPathExtent]; KernelInfo *kernel_info; ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); GetNextToken(argv[i],(const char **) NULL,MagickPathExtent,token); op=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedMorphologyMethod", token); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i],exception); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("mosaic",option+1) == 0) break; if (LocaleCompare("motion-blur",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'n': { if (LocaleCompare("negate",option+1) == 0) break; if (LocaleCompare("noise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') { ssize_t noise; noise=ParseCommandOption(MagickNoiseOptions,MagickFalse, argv[i]); if (noise < 0) ThrowMogrifyException(OptionError,"UnrecognizedNoiseType", argv[i]); break; } if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("noop",option+1) == 0) break; if (LocaleCompare("normalize",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("orient",option+1) == 0) { ssize_t orientation; orientation=UndefinedOrientation; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); orientation=ParseCommandOption(MagickOrientationOptions,MagickFalse, argv[i]); if (orientation < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageOrientation", argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'p': { if (LocaleCompare("page",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("paint",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("path",option+1) == 0) { (void) CloneString(&path,(char *) NULL); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); (void) CloneString(&path,argv[i]); break; } if (LocaleCompare("perceptible",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("polaroid",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("poly",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("posterize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("precision",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("print",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("process",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("profile",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'q': { if (LocaleCompare("quality",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("quantize",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("quiet",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'r': { if (LocaleCompare("rotational-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("raise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("random-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("range-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("read-mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("red-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); } if (LocaleCompare("regard-warnings",option+1) == 0) break; if (LocaleCompare("region",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("remap",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("render",option+1) == 0) break; if (LocaleCompare("repage",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("resample",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("resize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleNCompare("respect-parentheses",option+1,17) == 0) { respect_parenthesis=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("reverse",option+1) == 0) break; if (LocaleCompare("roll",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("rotate",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 's': { if (LocaleCompare("sample",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sampling-factor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("scale",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("scene",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("seed",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("segment",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("selective-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("separate",option+1) == 0) break; if (LocaleCompare("sepia-tone",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("set",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("shade",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shadow",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sharpen",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shave",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shear",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("size",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sketch",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("smush",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; break; } if (LocaleCompare("solarize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sparse-color",option+1) == 0) { ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickSparseColorOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedSparseColorMethod", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("splice",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("spread",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("statistic",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickStatisticOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedStatisticType", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("stretch",option+1) == 0) { ssize_t stretch; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse, argv[i]); if (stretch < 0) ThrowMogrifyException(OptionError,"UnrecognizedStyleType", argv[i]); break; } if (LocaleCompare("strip",option+1) == 0) break; if (LocaleCompare("stroke",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("style",option+1) == 0) { ssize_t style; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); style=ParseCommandOption(MagickStyleOptions,MagickFalse,argv[i]); if (style < 0) ThrowMogrifyException(OptionError,"UnrecognizedStyleType", argv[i]); break; } if (LocaleCompare("swap",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("swirl",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("synchronize",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 't': { if (LocaleCompare("taint",option+1) == 0) break; if (LocaleCompare("texture",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("tile",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("tile-offset",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("tint",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("transform",option+1) == 0) break; if (LocaleCompare("transpose",option+1) == 0) break; if (LocaleCompare("transverse",option+1) == 0) break; if (LocaleCompare("threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("thumbnail",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("transparent",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("transparent-color",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("treedepth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("trim",option+1) == 0) break; if (LocaleCompare("type",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickTypeOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageType", argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("unique-colors",option+1) == 0) break; if (LocaleCompare("units",option+1) == 0) { ssize_t units; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); units=ParseCommandOption(MagickResolutionOptions,MagickFalse, argv[i]); if (units < 0) ThrowMogrifyException(OptionError,"UnrecognizedUnitsType", argv[i]); break; } if (LocaleCompare("unsharp",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { image_info->verbose=(*option == '-') ? MagickTrue : MagickFalse; break; } if ((LocaleCompare("version",option+1) == 0) || (LocaleCompare("-version",option+1) == 0)) { ListMagickVersion(stdout); break; } if (LocaleCompare("vignette",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError, "UnrecognizedVirtualPixelMethod",argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'w': { if (LocaleCompare("wave",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("weight",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("white-point",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("white-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("write",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("write-mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case '?': break; default: ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } fire=(GetCommandOptionFlags(MagickCommandOptions,MagickFalse,option) & FireOptionFlag) == 0 ? MagickFalse : MagickTrue; if (fire != MagickFalse) FireImageStack(MagickFalse,MagickTrue,MagickTrue); } if (k != 0) ThrowMogrifyException(OptionError,"UnbalancedParenthesis",argv[i]); if (i != (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingAnImageFilename",argv[i]); DestroyMogrify(); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageInfo() applies image processing settings to the image as % prescribed by command line options. % % The format of the MogrifyImageInfo method is: % % MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,const int argc, % const char **argv,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info, const int argc,const char **argv,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; ssize_t count; register ssize_t i; /* Initialize method variables. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (argc < 0) return(MagickTrue); /* Set the image settings. */ for (i=0; i < (ssize_t) argc; i++) { option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=ParseCommandOption(MagickCommandOptions,MagickFalse,option); count=MagickMax(count,0L); if ((i+count) >= (ssize_t) argc) break; switch (*(option+1)) { case 'a': { if (LocaleCompare("adjoin",option+1) == 0) { image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("antialias",option+1) == 0) { image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("authenticate",option+1) == 0) { if (*option == '+') (void) DeleteImageOption(image_info,option+1); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'b': { if (LocaleCompare("background",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) QueryColorCompliance(MogrifyBackgroundColor, AllCompliance,&image_info->background_color,exception); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorCompliance(argv[i+1],AllCompliance, &image_info->background_color,exception); break; } if (LocaleCompare("bias",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,"convolve:bias","0.0"); break; } (void) SetImageOption(image_info,"convolve:bias",argv[i+1]); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("blue-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) QueryColorCompliance(MogrifyBorderColor,AllCompliance, &image_info->border_color,exception); break; } (void) QueryColorCompliance(argv[i+1],AllCompliance, &image_info->border_color,exception); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("box",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,"undercolor","none"); break; } (void) SetImageOption(image_info,"undercolor",argv[i+1]); break; } break; } case 'c': { if (LocaleCompare("cache",option+1) == 0) { MagickSizeType limit; limit=MagickResourceInfinity; if (LocaleCompare("unlimited",argv[i+1]) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1], 100.0); (void) SetMagickResourceLimit(MemoryResource,limit); (void) SetMagickResourceLimit(MapResource,2*limit); break; } if (LocaleCompare("caption",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("colorspace",option+1) == 0) { if (*option == '+') { image_info->colorspace=UndefinedColorspace; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("comment",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("compose",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("compress",option+1) == 0) { if (*option == '+') { image_info->compression=UndefinedCompression; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'd': { if (LocaleCompare("debug",option+1) == 0) { if (*option == '+') (void) SetLogEventMask("none"); else (void) SetLogEventMask(argv[i+1]); image_info->debug=IsEventLogging(); break; } if (LocaleCompare("define",option+1) == 0) { if (*option == '+') { if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) DeleteImageRegistry(argv[i+1]+9); else (void) DeleteImageOption(image_info,argv[i+1]); break; } if (LocaleNCompare(argv[i+1],"registry:",9) == 0) { (void) DefineImageRegistry(StringRegistryType,argv[i+1]+9, exception); break; } (void) DefineImageOption(image_info,argv[i+1]); break; } if (LocaleCompare("delay",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("density",option+1) == 0) { /* Set image density. */ if (*option == '+') { if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); (void) SetImageOption(image_info,option+1,"72"); break; } (void) CloneString(&image_info->density,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("depth",option+1) == 0) { if (*option == '+') { image_info->depth=MAGICKCORE_QUANTUM_DEPTH; break; } image_info->depth=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("direction",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("display",option+1) == 0) { if (*option == '+') { if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); break; } (void) CloneString(&image_info->server_name,argv[i+1]); break; } if (LocaleCompare("dispose",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { image_info->dither=MagickFalse; (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); image_info->dither=MagickTrue; break; } break; } case 'e': { if (LocaleCompare("encoding",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("endian",option+1) == 0) { if (*option == '+') { image_info->endian=UndefinedEndian; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->endian=(EndianType) ParseCommandOption( MagickEndianOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("extract",option+1) == 0) { /* Set image extract geometry. */ if (*option == '+') { if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); break; } (void) CloneString(&image_info->extract,argv[i+1]); break; } break; } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option != '+') (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("fill",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("filter",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') { if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); break; } (void) CloneString(&image_info->font,argv[i+1]); break; } if (LocaleCompare("format",option+1) == 0) { register const char *q; for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%')) if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL) image_info->ping=MagickFalse; (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("fuzz",option+1) == 0) { if (*option == '+') { image_info->fuzz=0.0; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->fuzz=StringToDoubleInterval(argv[i+1],(double) QuantumRange+1.0); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'g': { if (LocaleCompare("gravity",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("green-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'i': { if (LocaleCompare("intensity",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("intent",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interlace",option+1) == 0) { if (*option == '+') { image_info->interlace=UndefinedInterlace; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->interlace=(InterlaceType) ParseCommandOption( MagickInterlaceOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interpolate",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'l': { if (LocaleCompare("label",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("limit",option+1) == 0) { MagickSizeType limit; ResourceType type; if (*option == '+') break; type=(ResourceType) ParseCommandOption(MagickResourceOptions, MagickFalse,argv[i+1]); limit=MagickResourceInfinity; if (LocaleCompare("unlimited",argv[i+2]) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0); (void) SetMagickResourceLimit(type,limit); break; } if (LocaleCompare("list",option+1) == 0) { ssize_t list; /* Display configuration list. */ list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]); switch (list) { case MagickCoderOptions: { (void) ListCoderInfo((FILE *) NULL,exception); break; } case MagickColorOptions: { (void) ListColorInfo((FILE *) NULL,exception); break; } case MagickConfigureOptions: { (void) ListConfigureInfo((FILE *) NULL,exception); break; } case MagickDelegateOptions: { (void) ListDelegateInfo((FILE *) NULL,exception); break; } case MagickFontOptions: { (void) ListTypeInfo((FILE *) NULL,exception); break; } case MagickFormatOptions: { (void) ListMagickInfo((FILE *) NULL,exception); break; } case MagickLocaleOptions: { (void) ListLocaleInfo((FILE *) NULL,exception); break; } case MagickLogOptions: { (void) ListLogInfo((FILE *) NULL,exception); break; } case MagickMagicOptions: { (void) ListMagicInfo((FILE *) NULL,exception); break; } case MagickMimeOptions: { (void) ListMimeInfo((FILE *) NULL,exception); break; } case MagickModuleOptions: { (void) ListModuleInfo((FILE *) NULL,exception); break; } case MagickPolicyOptions: { (void) ListPolicyInfo((FILE *) NULL,exception); break; } case MagickResourceOptions: { (void) ListMagickResourceInfo((FILE *) NULL,exception); break; } case MagickThresholdOptions: { (void) ListThresholdMaps((FILE *) NULL,exception); break; } default: { (void) ListCommandOptions((FILE *) NULL,(CommandOption) list, exception); break; } } break; } if (LocaleCompare("log",option+1) == 0) { if (*option == '+') break; (void) SetLogFormat(argv[i+1]); break; } if (LocaleCompare("loop",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'm': { if (LocaleCompare("matte",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("mattecolor",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorCompliance(MogrifyAlphaColor,AllCompliance, &image_info->matte_color,exception); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorCompliance(argv[i+1],AllCompliance, &image_info->matte_color,exception); break; } if (LocaleCompare("metric",option+1) == 0) { if (*option == '+') (void) DeleteImageOption(image_info,option+1); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("monitor",option+1) == 0) { (void) SetImageInfoProgressMonitor(image_info,MonitorProgress, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse; break; } break; } case 'o': { if (LocaleCompare("orient",option+1) == 0) { if (*option == '+') { image_info->orientation=UndefinedOrientation; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } } case 'p': { if (LocaleCompare("page",option+1) == 0) { char *canonical_page, page[MagickPathExtent]; const char *image_option; MagickStatusType flags; RectangleInfo geometry; if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) CloneString(&image_info->page,(char *) NULL); break; } (void) memset(&geometry,0,sizeof(geometry)); image_option=GetImageOption(image_info,"page"); if (image_option != (const char *) NULL) flags=ParseAbsoluteGeometry(image_option,&geometry); canonical_page=GetPageGeometry(argv[i+1]); flags=ParseAbsoluteGeometry(canonical_page,&geometry); canonical_page=DestroyString(canonical_page); (void) FormatLocaleString(page,MagickPathExtent,"%lux%lu", (unsigned long) geometry.width,(unsigned long) geometry.height); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) FormatLocaleString(page,MagickPathExtent,"%lux%lu%+ld%+ld", (unsigned long) geometry.width,(unsigned long) geometry.height, (long) geometry.x,(long) geometry.y); (void) SetImageOption(image_info,option+1,page); (void) CloneString(&image_info->page,page); break; } if (LocaleCompare("ping",option+1) == 0) { image_info->ping=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') geometry_info.rho=0.0; else (void) ParseGeometry(argv[i+1],&geometry_info); image_info->pointsize=geometry_info.rho; break; } if (LocaleCompare("precision",option+1) == 0) { (void) SetMagickPrecision(StringToInteger(argv[i+1])); break; } break; } case 'q': { if (LocaleCompare("quality",option+1) == 0) { /* Set image compression quality. */ if (*option == '+') { image_info->quality=UndefinedCompressionQuality; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->quality=StringToUnsignedLong(argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("quiet",option+1) == 0) { static WarningHandler warning_handler = (WarningHandler) NULL; if (*option == '+') { /* Restore error or warning messages. */ warning_handler=SetWarningHandler(warning_handler); break; } /* Suppress error or warning messages. */ warning_handler=SetWarningHandler((WarningHandler) NULL); break; } break; } case 'r': { if (LocaleCompare("red-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 's': { if (LocaleCompare("sampling-factor",option+1) == 0) { /* Set image sampling factor. */ if (*option == '+') { if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); break; } (void) CloneString(&image_info->sampling_factor,argv[i+1]); break; } if (LocaleCompare("scene",option+1) == 0) { /* Set image scene. */ if (*option == '+') { image_info->scene=0; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->scene=StringToUnsignedLong(argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("seed",option+1) == 0) { unsigned long seed; if (*option == '+') { seed=(unsigned long) time((time_t *) NULL); SetRandomSecretKey(seed); break; } seed=StringToUnsignedLong(argv[i+1]); SetRandomSecretKey(seed); break; } if (LocaleCompare("size",option+1) == 0) { if (*option == '+') { if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); break; } (void) CloneString(&image_info->size,argv[i+1]); break; } if (LocaleCompare("stroke",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("style",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("synchronize",option+1) == 0) { if (*option == '+') { image_info->synchronize=MagickFalse; break; } image_info->synchronize=MagickTrue; break; } break; } case 't': { if (LocaleCompare("taint",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("texture",option+1) == 0) { if (*option == '+') { if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); break; } (void) CloneString(&image_info->texture,argv[i+1]); break; } if (LocaleCompare("tile-offset",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("transparent-color",option+1) == 0) { if (*option == '+') { (void) QueryColorCompliance("none",AllCompliance, &image_info->transparent_color,exception); (void) SetImageOption(image_info,option+1,"none"); break; } (void) QueryColorCompliance(argv[i+1],AllCompliance, &image_info->transparent_color,exception); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("type",option+1) == 0) { if (*option == '+') { image_info->type=UndefinedType; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions, MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { if (*option == '+') (void) DeleteImageOption(image_info,option+1); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("units",option+1) == 0) { if (*option == '+') { image_info->units=UndefinedResolution; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->units=(ResolutionType) ParseCommandOption( MagickResolutionOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { if (*option == '+') { image_info->verbose=MagickFalse; break; } image_info->verbose=MagickTrue; image_info->ping=MagickFalse; break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"undefined"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'w': { if (LocaleCompare("weight",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("white-point",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0.0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } default: break; } i+=count; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageList() applies any command line options that might affect the % entire image list (e.g. -append, -coalesce, etc.). % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImageList(ImageInfo *image_info,const int argc, % const char **argv,Image **images,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o images: pointer to pointer of the first image in image list. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info, const int argc,const char **argv,Image **images,ExceptionInfo *exception) { const char *option; ImageInfo *mogrify_info; MagickStatusType status; PixelInterpolateMethod interpolate_method; QuantizeInfo *quantize_info; register ssize_t i; ssize_t count, index; /* Apply options to the image list. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image **) NULL); assert((*images)->previous == (Image *) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); if ((argc <= 0) || (*argv == (char *) NULL)) return(MagickTrue); interpolate_method=UndefinedInterpolatePixel; mogrify_info=CloneImageInfo(image_info); quantize_info=AcquireQuantizeInfo(mogrify_info); status=MagickTrue; for (i=0; i < (ssize_t) argc; i++) { if (*images == (Image *) NULL) break; option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=ParseCommandOption(MagickCommandOptions,MagickFalse,option); count=MagickMax(count,0L); if ((i+count) >= (ssize_t) argc) break; status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception); switch (*(option+1)) { case 'a': { if (LocaleCompare("affinity",option+1) == 0) { (void) SyncImagesSettings(mogrify_info,*images,exception); if (*option == '+') { (void) RemapImages(quantize_info,*images,(Image *) NULL, exception); break; } i++; break; } if (LocaleCompare("append",option+1) == 0) { Image *append_image; (void) SyncImagesSettings(mogrify_info,*images,exception); append_image=AppendImages(*images,*option == '-' ? MagickTrue : MagickFalse,exception); if (append_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=append_image; break; } if (LocaleCompare("average",option+1) == 0) { Image *average_image; /* Average an image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images,exception); average_image=EvaluateImages(*images,MeanEvaluateOperator, exception); if (average_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=average_image; break; } break; } case 'c': { if (LocaleCompare("channel-fx",option+1) == 0) { Image *channel_image; (void) SyncImagesSettings(mogrify_info,*images,exception); channel_image=ChannelFxImage(*images,argv[i+1],exception); if (channel_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=channel_image; break; } if (LocaleCompare("clut",option+1) == 0) { Image *clut_image, *image; (void) SyncImagesSettings(mogrify_info,*images,exception); image=RemoveFirstImageFromList(images); clut_image=RemoveFirstImageFromList(images); if (clut_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); image=DestroyImage(image); status=MagickFalse; break; } (void) ClutImage(image,clut_image,interpolate_method,exception); clut_image=DestroyImage(clut_image); *images=DestroyImageList(*images); *images=image; break; } if (LocaleCompare("coalesce",option+1) == 0) { Image *coalesce_image; (void) SyncImagesSettings(mogrify_info,*images,exception); coalesce_image=CoalesceImages(*images,exception); if (coalesce_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=coalesce_image; break; } if (LocaleCompare("combine",option+1) == 0) { ColorspaceType colorspace; Image *combine_image; (void) SyncImagesSettings(mogrify_info,*images,exception); colorspace=(*images)->colorspace; if ((*images)->number_channels < GetImageListLength(*images)) colorspace=sRGBColorspace; if (*option == '+') colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); combine_image=CombineImages(*images,colorspace,exception); if (combine_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=combine_image; break; } if (LocaleCompare("compare",option+1) == 0) { double distortion; Image *difference_image, *image, *reconstruct_image; MetricType metric; /* Mathematically and visually annotate the difference between an image and its reconstruction. */ (void) SyncImagesSettings(mogrify_info,*images,exception); image=RemoveFirstImageFromList(images); reconstruct_image=RemoveFirstImageFromList(images); if (reconstruct_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); image=DestroyImage(image); status=MagickFalse; break; } metric=UndefinedErrorMetric; option=GetImageOption(mogrify_info,"metric"); if (option != (const char *) NULL) metric=(MetricType) ParseCommandOption(MagickMetricOptions, MagickFalse,option); difference_image=CompareImages(image,reconstruct_image,metric, &distortion,exception); if (difference_image == (Image *) NULL) break; reconstruct_image=DestroyImage(reconstruct_image); image=DestroyImage(image); if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=difference_image; break; } if (LocaleCompare("complex",option+1) == 0) { ComplexOperator op; Image *complex_images; (void) SyncImageSettings(mogrify_info,*images,exception); op=(ComplexOperator) ParseCommandOption(MagickComplexOptions, MagickFalse,argv[i+1]); complex_images=ComplexImages(*images,op,exception); if (complex_images == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=complex_images; break; } if (LocaleCompare("composite",option+1) == 0) { CompositeOperator compose; const char* value; MagickBooleanType clip_to_self; Image *mask_image, *new_images, *source_image; RectangleInfo geometry; /* Compose value from "-compose" option only */ (void) SyncImageSettings(mogrify_info,*images,exception); value=GetImageOption(mogrify_info,"compose"); if (value == (const char *) NULL) compose=OverCompositeOp; /* use Over not source_image->compose */ else compose=(CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,value); /* Get "clip-to-self" expert setting (false is normal) */ clip_to_self=GetCompositeClipToSelf(compose); value=GetImageOption(mogrify_info,"compose:clip-to-self"); if (value != (const char *) NULL) clip_to_self=IsStringTrue(value); value=GetImageOption(mogrify_info,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsStringFalse(value); /* deprecated */ new_images=RemoveFirstImageFromList(images); source_image=RemoveFirstImageFromList(images); if (source_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); new_images=DestroyImage(new_images); status=MagickFalse; break; } /* FUTURE: this should not be here! - should be part of -geometry */ if (source_image->geometry != (char *) NULL) { RectangleInfo resize_geometry; (void) ParseRegionGeometry(source_image,source_image->geometry, &resize_geometry,exception); if ((source_image->columns != resize_geometry.width) || (source_image->rows != resize_geometry.height)) { Image *resize_image; resize_image=ResizeImage(source_image,resize_geometry.width, resize_geometry.height,source_image->filter,exception); if (resize_image != (Image *) NULL) { source_image=DestroyImage(source_image); source_image=resize_image; } } } SetGeometry(source_image,&geometry); (void) ParseAbsoluteGeometry(source_image->geometry,&geometry); GravityAdjustGeometry(new_images->columns,new_images->rows, new_images->gravity,&geometry); mask_image=RemoveFirstImageFromList(images); if (mask_image == (Image *) NULL) status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,exception); else { if ((compose == DisplaceCompositeOp) || (compose == DistortCompositeOp)) { status&=CompositeImage(source_image,mask_image, CopyGreenCompositeOp,MagickTrue,0,0,exception); status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,exception); } else { Image *clone_image; clone_image=CloneImage(new_images,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) break; status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,exception); status&=CompositeImage(new_images,mask_image, CopyAlphaCompositeOp,MagickTrue,0,0,exception); status&=CompositeImage(clone_image,new_images, OverCompositeOp,clip_to_self,0,0,exception); new_images=DestroyImageList(new_images); new_images=clone_image; } mask_image=DestroyImage(mask_image); } source_image=DestroyImage(source_image); *images=DestroyImageList(*images); *images=new_images; break; } if (LocaleCompare("copy",option+1) == 0) { Image *source_image; OffsetInfo offset; RectangleInfo geometry; /* Copy image pixels. */ (void) SyncImageSettings(mogrify_info,*images,exception); (void) ParsePageGeometry(*images,argv[i+2],&geometry,exception); offset.x=geometry.x; offset.y=geometry.y; source_image=(*images); if (source_image->next != (Image *) NULL) source_image=source_image->next; (void) ParsePageGeometry(source_image,argv[i+1],&geometry, exception); status=CopyImagePixels(*images,source_image,&geometry,&offset, exception); break; } break; } case 'd': { if (LocaleCompare("deconstruct",option+1) == 0) { Image *deconstruct_image; (void) SyncImagesSettings(mogrify_info,*images,exception); deconstruct_image=CompareImagesLayers(*images,CompareAnyLayer, exception); if (deconstruct_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=deconstruct_image; break; } if (LocaleCompare("delete",option+1) == 0) { if (*option == '+') DeleteImages(images,"-1",exception); else DeleteImages(images,argv[i+1],exception); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { quantize_info->dither_method=NoDitherMethod; break; } quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("duplicate",option+1) == 0) { Image *duplicate_images; if (*option == '+') duplicate_images=DuplicateImages(*images,1,"-1",exception); else { const char *p; size_t number_duplicates; number_duplicates=(size_t) StringToLong(argv[i+1]); p=strchr(argv[i+1],','); if (p == (const char *) NULL) duplicate_images=DuplicateImages(*images,number_duplicates, "-1",exception); else duplicate_images=DuplicateImages(*images,number_duplicates,p, exception); } AppendImageToList(images, duplicate_images); (void) SyncImagesSettings(mogrify_info,*images,exception); break; } break; } case 'e': { if (LocaleCompare("evaluate-sequence",option+1) == 0) { Image *evaluate_image; MagickEvaluateOperator op; (void) SyncImageSettings(mogrify_info,*images,exception); op=(MagickEvaluateOperator) ParseCommandOption( MagickEvaluateOptions,MagickFalse,argv[i+1]); evaluate_image=EvaluateImages(*images,op,exception); if (evaluate_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=evaluate_image; break; } break; } case 'f': { if (LocaleCompare("fft",option+1) == 0) { Image *fourier_image; /* Implements the discrete Fourier transform (DFT). */ (void) SyncImageSettings(mogrify_info,*images,exception); fourier_image=ForwardFourierTransformImage(*images,*option == '-' ? MagickTrue : MagickFalse,exception); if (fourier_image == (Image *) NULL) break; *images=DestroyImageList(*images); *images=fourier_image; break; } if (LocaleCompare("flatten",option+1) == 0) { Image *flatten_image; (void) SyncImagesSettings(mogrify_info,*images,exception); flatten_image=MergeImageLayers(*images,FlattenLayer,exception); if (flatten_image == (Image *) NULL) break; *images=DestroyImageList(*images); *images=flatten_image; break; } if (LocaleCompare("fx",option+1) == 0) { Image *fx_image; (void) SyncImagesSettings(mogrify_info,*images,exception); fx_image=FxImage(*images,argv[i+1],exception); if (fx_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=fx_image; break; } break; } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) { Image *hald_image, *image; (void) SyncImagesSettings(mogrify_info,*images,exception); image=RemoveFirstImageFromList(images); hald_image=RemoveFirstImageFromList(images); if (hald_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); image=DestroyImage(image); status=MagickFalse; break; } (void) HaldClutImage(image,hald_image,exception); hald_image=DestroyImage(hald_image); if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=image; break; } break; } case 'i': { if (LocaleCompare("ift",option+1) == 0) { Image *fourier_image, *magnitude_image, *phase_image; /* Implements the inverse fourier discrete Fourier transform (DFT). */ (void) SyncImagesSettings(mogrify_info,*images,exception); magnitude_image=RemoveFirstImageFromList(images); phase_image=RemoveFirstImageFromList(images); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); magnitude_image=DestroyImage(magnitude_image); status=MagickFalse; break; } fourier_image=InverseFourierTransformImage(magnitude_image, phase_image,*option == '-' ? MagickTrue : MagickFalse,exception); magnitude_image=DestroyImage(magnitude_image); phase_image=DestroyImage(phase_image); if (fourier_image == (Image *) NULL) break; if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=fourier_image; break; } if (LocaleCompare("insert",option+1) == 0) { Image *p, *q; index=0; if (*option != '+') index=(ssize_t) StringToLong(argv[i+1]); p=RemoveLastImageFromList(images); if (p == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",argv[i+1]); status=MagickFalse; break; } q=p; if (index == 0) PrependImageToList(images,q); else if (index == (ssize_t) GetImageListLength(*images)) AppendImageToList(images,q); else { q=GetImageFromList(*images,index-1); if (q == (Image *) NULL) { p=DestroyImage(p); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",argv[i+1]); status=MagickFalse; break; } InsertImageInList(&q,p); } *images=GetFirstImageInList(q); break; } if (LocaleCompare("interpolate",option+1) == 0) { interpolate_method=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,argv[i+1]); break; } break; } case 'l': { if (LocaleCompare("layers",option+1) == 0) { Image *layers; LayerMethod method; (void) SyncImagesSettings(mogrify_info,*images,exception); layers=(Image *) NULL; method=(LayerMethod) ParseCommandOption(MagickLayerOptions, MagickFalse,argv[i+1]); switch (method) { case CoalesceLayer: { layers=CoalesceImages(*images,exception); break; } case CompareAnyLayer: case CompareClearLayer: case CompareOverlayLayer: default: { layers=CompareImagesLayers(*images,method,exception); break; } case MergeLayer: case FlattenLayer: case MosaicLayer: case TrimBoundsLayer: { layers=MergeImageLayers(*images,method,exception); break; } case DisposeLayer: { layers=DisposeImages(*images,exception); break; } case OptimizeImageLayer: { layers=OptimizeImageLayers(*images,exception); break; } case OptimizePlusLayer: { layers=OptimizePlusImageLayers(*images,exception); break; } case OptimizeTransLayer: { OptimizeImageTransparency(*images,exception); break; } case RemoveDupsLayer: { RemoveDuplicateLayers(images,exception); break; } case RemoveZeroLayer: { RemoveZeroDelayLayers(images,exception); break; } case OptimizeLayer: { /* General Purpose, GIF Animation Optimizer. */ layers=CoalesceImages(*images,exception); if (layers == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=layers; layers=OptimizeImageLayers(*images,exception); if (layers == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=layers; layers=(Image *) NULL; OptimizeImageTransparency(*images,exception); (void) RemapImages(quantize_info,*images,(Image *) NULL, exception); break; } case CompositeLayer: { CompositeOperator compose; Image *source; RectangleInfo geometry; /* Split image sequence at the first 'NULL:' image. */ source=(*images); while (source != (Image *) NULL) { source=GetNextImageInList(source); if ((source != (Image *) NULL) && (LocaleCompare(source->magick,"NULL") == 0)) break; } if (source != (Image *) NULL) { if ((GetPreviousImageInList(source) == (Image *) NULL) || (GetNextImageInList(source) == (Image *) NULL)) source=(Image *) NULL; else { /* Separate the two lists, junk the null: image. */ source=SplitImageList(source->previous); DeleteImageFromList(&source); } } if (source == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"MissingNullSeparator","layers Composite"); status=MagickFalse; break; } /* Adjust offset with gravity and virtual canvas. */ SetGeometry(*images,&geometry); (void) ParseAbsoluteGeometry((*images)->geometry,&geometry); geometry.width=source->page.width != 0 ? source->page.width : source->columns; geometry.height=source->page.height != 0 ? source->page.height : source->rows; GravityAdjustGeometry((*images)->page.width != 0 ? (*images)->page.width : (*images)->columns, (*images)->page.height != 0 ? (*images)->page.height : (*images)->rows,(*images)->gravity,&geometry); compose=OverCompositeOp; option=GetImageOption(mogrify_info,"compose"); if (option != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,option); CompositeLayers(*images,compose,source,geometry.x,geometry.y, exception); source=DestroyImageList(source); break; } } if (layers == (Image *) NULL) break; *images=DestroyImageList(*images); *images=layers; break; } break; } case 'm': { if (LocaleCompare("map",option+1) == 0) { (void) SyncImagesSettings(mogrify_info,*images,exception); if (*option == '+') { (void) RemapImages(quantize_info,*images,(Image *) NULL, exception); break; } i++; break; } if (LocaleCompare("maximum",option+1) == 0) { Image *maximum_image; /* Maximum image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images,exception); maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception); if (maximum_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=maximum_image; break; } if (LocaleCompare("minimum",option+1) == 0) { Image *minimum_image; /* Minimum image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images,exception); minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception); if (minimum_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=minimum_image; break; } if (LocaleCompare("morph",option+1) == 0) { Image *morph_image; (void) SyncImagesSettings(mogrify_info,*images,exception); morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]), exception); if (morph_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=morph_image; break; } if (LocaleCompare("mosaic",option+1) == 0) { Image *mosaic_image; (void) SyncImagesSettings(mogrify_info,*images,exception); mosaic_image=MergeImageLayers(*images,MosaicLayer,exception); if (mosaic_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=mosaic_image; break; } break; } case 'p': { if (LocaleCompare("poly",option+1) == 0) { char *args, token[MagickPathExtent]; const char *p; double *arguments; Image *polynomial_image; register ssize_t x; size_t number_arguments; /* Polynomial image. */ (void) SyncImageSettings(mogrify_info,*images,exception); args=InterpretImageProperties(mogrify_info,*images,argv[i+1], exception); if (args == (char *) NULL) break; p=(char *) args; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } number_arguments=(size_t) x; arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*arguments)); if (arguments == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*images)->filename); (void) memset(arguments,0,number_arguments* sizeof(*arguments)); p=(char *) args; for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arguments[x]=StringToDouble(token,(char **) NULL); } args=DestroyString(args); polynomial_image=PolynomialImage(*images,number_arguments >> 1, arguments,exception); arguments=(double *) RelinquishMagickMemory(arguments); if (polynomial_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=polynomial_image; } if (LocaleCompare("print",option+1) == 0) { char *string; (void) SyncImagesSettings(mogrify_info,*images,exception); string=InterpretImageProperties(mogrify_info,*images,argv[i+1], exception); if (string == (char *) NULL) break; (void) FormatLocaleFile(stdout,"%s",string); string=DestroyString(string); } if (LocaleCompare("process",option+1) == 0) { char **arguments; int j, number_arguments; (void) SyncImagesSettings(mogrify_info,*images,exception); arguments=StringToArgv(argv[i+1],&number_arguments); if (arguments == (char **) NULL) break; if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL)) { char breaker, quote, *token; const char *argument; int next, token_status; size_t length; TokenInfo *token_info; /* Support old style syntax, filter="-option arg". */ length=strlen(argv[i+1]); token=(char *) NULL; if (~length >= (MagickPathExtent-1)) token=(char *) AcquireQuantumMemory(length+MagickPathExtent, sizeof(*token)); if (token == (char *) NULL) break; next=0; argument=argv[i+1]; token_info=AcquireTokenInfo(); token_status=Tokenizer(token_info,0,token,length,argument,"", "=","\"",'\0',&breaker,&next,&quote); token_info=DestroyTokenInfo(token_info); if (token_status == 0) { const char *arg; arg=(&(argument[next])); (void) InvokeDynamicImageFilter(token,&(*images),1,&arg, exception); } token=DestroyString(token); break; } (void) SubstituteString(&arguments[1],"-",""); (void) InvokeDynamicImageFilter(arguments[1],&(*images), number_arguments-2,(const char **) arguments+2,exception); for (j=0; j < number_arguments; j++) arguments[j]=DestroyString(arguments[j]); arguments=(char **) RelinquishMagickMemory(arguments); break; } break; } case 'r': { if (LocaleCompare("reverse",option+1) == 0) { ReverseImageList(images); break; } break; } case 's': { if (LocaleCompare("smush",option+1) == 0) { Image *smush_image; ssize_t offset; (void) SyncImagesSettings(mogrify_info,*images,exception); offset=(ssize_t) StringToLong(argv[i+1]); smush_image=SmushImages(*images,*option == '-' ? MagickTrue : MagickFalse,offset,exception); if (smush_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=smush_image; break; } if (LocaleCompare("swap",option+1) == 0) { Image *p, *q, *u, *v; ssize_t swap_index; index=(-1); swap_index=(-2); if (*option != '+') { GeometryInfo geometry_info; MagickStatusType flags; swap_index=(-1); flags=ParseGeometry(argv[i+1],&geometry_info); index=(ssize_t) geometry_info.rho; if ((flags & SigmaValue) != 0) swap_index=(ssize_t) geometry_info.sigma; } p=GetImageFromList(*images,index); q=GetImageFromList(*images,swap_index); if ((p == (Image *) NULL) || (q == (Image *) NULL)) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",(*images)->filename); status=MagickFalse; break; } if (p == q) break; u=CloneImage(p,0,0,MagickTrue,exception); if (u == (Image *) NULL) break; v=CloneImage(q,0,0,MagickTrue,exception); if (v == (Image *) NULL) { u=DestroyImage(u); break; } ReplaceImageInList(&p,v); ReplaceImageInList(&q,u); *images=GetFirstImageInList(q); break; } break; } case 'w': { if (LocaleCompare("write",option+1) == 0) { char key[MagickPathExtent]; Image *write_images; ImageInfo *write_info; (void) SyncImagesSettings(mogrify_info,*images,exception); (void) FormatLocaleString(key,MagickPathExtent,"cache:%s", argv[i+1]); (void) DeleteImageRegistry(key); write_images=(*images); if (*option == '+') write_images=CloneImageList(*images,exception); write_info=CloneImageInfo(mogrify_info); status&=WriteImages(write_info,write_images,argv[i+1],exception); write_info=DestroyImageInfo(write_info); if (*option == '+') write_images=DestroyImageList(write_images); break; } break; } default: break; } i+=count; } quantize_info=DestroyQuantizeInfo(quantize_info); mogrify_info=DestroyImageInfo(mogrify_info); status&=MogrifyImageInfo(image_info,argc,argv,exception); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImages() applies image processing options to a sequence of images as % prescribed by command line options. % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImages(ImageInfo *image_info, % const MagickBooleanType post,const int argc,const char **argv, % Image **images,Exceptioninfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o post: If true, post process image list operators otherwise pre-process. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o images: pointer to a pointer of the first image in image list. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImages(ImageInfo *image_info, const MagickBooleanType post,const int argc,const char **argv, Image **images,ExceptionInfo *exception) { #define MogrifyImageTag "Mogrify/Image" MagickStatusType status; MagickBooleanType proceed; size_t n; register ssize_t i; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (images == (Image **) NULL) return(MogrifyImage(image_info,argc,argv,images,exception)); assert((*images)->previous == (Image *) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); if ((argc <= 0) || (*argv == (char *) NULL)) return(MagickTrue); (void) SetImageInfoProgressMonitor(image_info,(MagickProgressMonitor) NULL, (void *) NULL); status=MagickTrue; #if 0 (void) FormatLocaleFile(stderr, "mogrify start %s %d (%s)\n",argv[0],argc, post?"post":"pre"); #endif /* Pre-process multi-image sequence operators */ if (post == MagickFalse) status&=MogrifyImageList(image_info,argc,argv,images,exception); /* For each image, process simple single image operators */ i=0; n=GetImageListLength(*images); for ( ; ; ) { #if 0 (void) FormatLocaleFile(stderr,"mogrify %ld of %ld\n",(long) GetImageIndexInList(*images),(long)GetImageListLength(*images)); #endif status&=MogrifyImage(image_info,argc,argv,images,exception); proceed=SetImageProgress(*images,MogrifyImageTag,(MagickOffsetType) i, n); if (proceed == MagickFalse) break; if ( (*images)->next == (Image *) NULL ) break; *images=(*images)->next; i++; } assert( *images != (Image *) NULL ); #if 0 (void) FormatLocaleFile(stderr,"mogrify end %ld of %ld\n",(long) GetImageIndexInList(*images),(long)GetImageListLength(*images)); #endif /* Post-process, multi-image sequence operators */ *images=GetFirstImageInList(*images); if (post != MagickFalse) status&=MogrifyImageList(image_info,argc,argv,images,exception); return(status != 0 ? MagickTrue : MagickFalse); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_946_0
crossvul-cpp_data_bad_4709_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-399/c/bad_4709_0
crossvul-cpp_data_good_5625_0
/* * linux/fs/ext4/namei.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * Directory entry file type support and forward compatibility hooks * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998 * Hash Tree Directory indexing (c) * Daniel Phillips, 2001 * Hash Tree Directory indexing porting * Christopher Li, 2002 * Hash Tree Directory indexing cleanup * Theodore Ts'o, 2002 */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/jbd2.h> #include <linux/time.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include <linux/bio.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include <trace/events/ext4.h> /* * define how far ahead to read directories while searching them. */ #define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) static struct buffer_head *ext4_append(handle_t *handle, struct inode *inode, ext4_lblk_t *block, int *err) { struct buffer_head *bh; if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb && ((inode->i_size >> 10) >= EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) { *err = -ENOSPC; return NULL; } *block = inode->i_size >> inode->i_sb->s_blocksize_bits; bh = ext4_bread(handle, inode, *block, 1, err); if (bh) { inode->i_size += inode->i_sb->s_blocksize; EXT4_I(inode)->i_disksize = inode->i_size; *err = ext4_journal_get_write_access(handle, bh); if (*err) { brelse(bh); bh = NULL; } } if (!bh && !(*err)) { *err = -EIO; ext4_error(inode->i_sb, "Directory hole detected on inode %lu\n", inode->i_ino); } return bh; } #ifndef assert #define assert(test) J_ASSERT(test) #endif #ifdef DX_DEBUG #define dxtrace(command) command #else #define dxtrace(command) #endif struct fake_dirent { __le32 inode; __le16 rec_len; u8 name_len; u8 file_type; }; struct dx_countlimit { __le16 limit; __le16 count; }; struct dx_entry { __le32 hash; __le32 block; }; /* * dx_root_info is laid out so that if it should somehow get overlaid by a * dirent the two low bits of the hash version will be zero. Therefore, the * hash version mod 4 should never be 0. Sincerely, the paranoia department. */ struct dx_root { struct fake_dirent dot; char dot_name[4]; struct fake_dirent dotdot; char dotdot_name[4]; struct dx_root_info { __le32 reserved_zero; u8 hash_version; u8 info_length; /* 8 */ u8 indirect_levels; u8 unused_flags; } info; struct dx_entry entries[0]; }; struct dx_node { struct fake_dirent fake; struct dx_entry entries[0]; }; struct dx_frame { struct buffer_head *bh; struct dx_entry *entries; struct dx_entry *at; }; struct dx_map_entry { u32 hash; u16 offs; u16 size; }; /* * This goes at the end of each htree block. */ struct dx_tail { u32 dt_reserved; __le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */ }; static inline ext4_lblk_t dx_get_block(struct dx_entry *entry); static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value); static inline unsigned dx_get_hash(struct dx_entry *entry); static void dx_set_hash(struct dx_entry *entry, unsigned value); static unsigned dx_get_count(struct dx_entry *entries); static unsigned dx_get_limit(struct dx_entry *entries); static void dx_set_count(struct dx_entry *entries, unsigned value); static void dx_set_limit(struct dx_entry *entries, unsigned value); static unsigned dx_root_limit(struct inode *dir, unsigned infosize); static unsigned dx_node_limit(struct inode *dir); static struct dx_frame *dx_probe(const struct qstr *d_name, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame, int *err); static void dx_release(struct dx_frame *frames); static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry map[]); static void dx_sort_map(struct dx_map_entry *map, unsigned count); static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to, struct dx_map_entry *offsets, int count, unsigned blocksize); static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize); static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block); static int ext4_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash); static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *err); static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode); /* checksumming functions */ void initialize_dirent_tail(struct ext4_dir_entry_tail *t, unsigned int blocksize) { memset(t, 0, sizeof(struct ext4_dir_entry_tail)); t->det_rec_len = ext4_rec_len_to_disk( sizeof(struct ext4_dir_entry_tail), blocksize); t->det_reserved_ft = EXT4_FT_DIR_CSUM; } /* Walk through a dirent block to find a checksum "dirent" at the tail */ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode, struct ext4_dir_entry *de) { struct ext4_dir_entry_tail *t; #ifdef PARANOID struct ext4_dir_entry *d, *top; d = de; top = (struct ext4_dir_entry *)(((void *)de) + (EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct ext4_dir_entry_tail))); while (d < top && d->rec_len) d = (struct ext4_dir_entry *)(((void *)d) + le16_to_cpu(d->rec_len)); if (d != top) return NULL; t = (struct ext4_dir_entry_tail *)d; #else t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb)); #endif if (t->det_reserved_zero1 || le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) || t->det_reserved_zero2 || t->det_reserved_ft != EXT4_FT_DIR_CSUM) return NULL; return t; } static __le32 ext4_dirent_csum(struct inode *inode, struct ext4_dir_entry *dirent, int size) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); __u32 csum; csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); return cpu_to_le32(csum); } static void warn_no_space_for_csum(struct inode *inode) { ext4_warning(inode->i_sb, "no space in directory inode %lu leaf for " "checksum. Please run e2fsck -D.", inode->i_ino); } int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) { struct ext4_dir_entry_tail *t; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return 1; t = get_dirent_tail(inode, dirent); if (!t) { warn_no_space_for_csum(inode); return 0; } if (t->det_checksum != ext4_dirent_csum(inode, dirent, (void *)t - (void *)dirent)) return 0; return 1; } static void ext4_dirent_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) { struct ext4_dir_entry_tail *t; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return; t = get_dirent_tail(inode, dirent); if (!t) { warn_no_space_for_csum(inode); return; } t->det_checksum = ext4_dirent_csum(inode, dirent, (void *)t - (void *)dirent); } int ext4_handle_dirty_dirent_node(handle_t *handle, struct inode *inode, struct buffer_head *bh) { ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data); return ext4_handle_dirty_metadata(handle, inode, bh); } static struct dx_countlimit *get_dx_countlimit(struct inode *inode, struct ext4_dir_entry *dirent, int *offset) { struct ext4_dir_entry *dp; struct dx_root_info *root; int count_offset; if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb)) count_offset = 8; else if (le16_to_cpu(dirent->rec_len) == 12) { dp = (struct ext4_dir_entry *)(((void *)dirent) + 12); if (le16_to_cpu(dp->rec_len) != EXT4_BLOCK_SIZE(inode->i_sb) - 12) return NULL; root = (struct dx_root_info *)(((void *)dp + 12)); if (root->reserved_zero || root->info_length != sizeof(struct dx_root_info)) return NULL; count_offset = 32; } else return NULL; if (offset) *offset = count_offset; return (struct dx_countlimit *)(((void *)dirent) + count_offset); } static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent, int count_offset, int count, struct dx_tail *t) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); __u32 csum, old_csum; int size; size = count_offset + (count * sizeof(struct dx_entry)); old_csum = t->dt_checksum; t->dt_checksum = 0; csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail)); t->dt_checksum = old_csum; return cpu_to_le32(csum); } static int ext4_dx_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) { struct dx_countlimit *c; struct dx_tail *t; int count_offset, limit, count; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return 1; c = get_dx_countlimit(inode, dirent, &count_offset); if (!c) { EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); return 1; } limit = le16_to_cpu(c->limit); count = le16_to_cpu(c->count); if (count_offset + (limit * sizeof(struct dx_entry)) > EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { warn_no_space_for_csum(inode); return 1; } t = (struct dx_tail *)(((struct dx_entry *)c) + limit); if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset, count, t)) return 0; return 1; } static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) { struct dx_countlimit *c; struct dx_tail *t; int count_offset, limit, count; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return; c = get_dx_countlimit(inode, dirent, &count_offset); if (!c) { EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); return; } limit = le16_to_cpu(c->limit); count = le16_to_cpu(c->count); if (count_offset + (limit * sizeof(struct dx_entry)) > EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { warn_no_space_for_csum(inode); return; } t = (struct dx_tail *)(((struct dx_entry *)c) + limit); t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t); } static inline int ext4_handle_dirty_dx_node(handle_t *handle, struct inode *inode, struct buffer_head *bh) { ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data); return ext4_handle_dirty_metadata(handle, inode, bh); } /* * p is at least 6 bytes before the end of page */ static inline struct ext4_dir_entry_2 * ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize) { return (struct ext4_dir_entry_2 *)((char *)p + ext4_rec_len_from_disk(p->rec_len, blocksize)); } /* * Future: use high four bits of block for coalesce-on-delete flags * Mask them off for now. */ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry) { return le32_to_cpu(entry->block) & 0x00ffffff; } static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value) { entry->block = cpu_to_le32(value); } static inline unsigned dx_get_hash(struct dx_entry *entry) { return le32_to_cpu(entry->hash); } static inline void dx_set_hash(struct dx_entry *entry, unsigned value) { entry->hash = cpu_to_le32(value); } static inline unsigned dx_get_count(struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->count); } static inline unsigned dx_get_limit(struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->limit); } static inline void dx_set_count(struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->count = cpu_to_le16(value); } static inline void dx_set_limit(struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); } static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) { unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - EXT4_DIR_REC_LEN(2) - infosize; if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } static inline unsigned dx_node_limit(struct inode *dir) { unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } /* * Debug */ #ifdef DX_DEBUG static void dx_show_index(char * label, struct dx_entry *entries) { int i, n = dx_get_count (entries); printk(KERN_DEBUG "%s index ", label); for (i = 0; i < n; i++) { printk("%x->%lu ", i ? dx_get_hash(entries + i) : 0, (unsigned long)dx_get_block(entries + i)); } printk("\n"); } struct stats { unsigned names; unsigned space; unsigned bcount; }; static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de, int size, int show_names) { unsigned names = 0, space = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; printk("names: "); while ((char *) de < base + size) { if (de->inode) { if (show_names) { int len = de->name_len; char *name = de->name; while (len--) printk("%c", *name++); ext4fs_dirhash(de->name, de->name_len, &h); printk(":%x.%u ", h.hash, (unsigned) ((char *) de - base)); } space += EXT4_DIR_REC_LEN(de->name_len); names++; } de = ext4_next_entry(de, size); } printk("(%i)\n", names); return (struct stats) { names, space, 1 }; } struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, struct dx_entry *entries, int levels) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count = dx_get_count(entries), names = 0, space = 0, i; unsigned bcount = 0; struct buffer_head *bh; int err; printk("%i indexed blocks...\n", count); for (i = 0; i < count; i++, entries++) { ext4_lblk_t block = dx_get_block(entries); ext4_lblk_t hash = i ? dx_get_hash(entries): 0; u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; struct stats stats; printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range); if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue; stats = levels? dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0); names += stats.names; space += stats.space; bcount += stats.bcount; brelse(bh); } if (bcount) printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n", levels ? "" : " ", names, space/bcount, (space/bcount)*100/blocksize); return (struct stats) { names, space, bcount}; } #endif /* DX_DEBUG */ /* * Probe for a directory leaf block to search. * * dx_probe can return ERR_BAD_DX_DIR, which means there was a format * error in the directory index, and the caller should fall back to * searching the directory normally. The callers of dx_probe **MUST** * check for this error code, and make sure it never gets reflected * back to userspace. */ static struct dx_frame * dx_probe(const struct qstr *d_name, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err) { unsigned count, indirect; struct dx_entry *at, *entries, *p, *q, *m; struct dx_root *root; struct buffer_head *bh; struct dx_frame *frame = frame_in; u32 hash; frame->bh = NULL; if (!(bh = ext4_bread(NULL, dir, 0, 0, err))) { if (*err == 0) *err = ERR_BAD_DX_DIR; goto fail; } root = (struct dx_root *) bh->b_data; if (root->info.hash_version != DX_HASH_TEA && root->info.hash_version != DX_HASH_HALF_MD4 && root->info.hash_version != DX_HASH_LEGACY) { ext4_warning(dir->i_sb, "Unrecognised inode hash code %d", root->info.hash_version); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } hinfo->hash_version = root->info.hash_version; if (hinfo->hash_version <= DX_HASH_TEA) hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; if (d_name) ext4fs_dirhash(d_name->name, d_name->len, hinfo); hash = hinfo->hash; if (root->info.unused_flags & 1) { ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x", root->info.unused_flags); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } if ((indirect = root->info.indirect_levels) > 1) { ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x", root->info.indirect_levels); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } if (!buffer_verified(bh) && !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { ext4_warning(dir->i_sb, "Root failed checksum"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } set_buffer_verified(bh); entries = (struct dx_entry *) (((char *)&root->info) + root->info.info_length); if (dx_get_limit(entries) != dx_root_limit(dir, root->info.info_length)) { ext4_warning(dir->i_sb, "dx entry: limit != root limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } dxtrace(printk("Look up %x", hash)); while (1) { count = dx_get_count(entries); if (!count || count > dx_get_limit(entries)) { ext4_warning(dir->i_sb, "dx entry: no count or count > limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail2; } p = entries + 1; q = entries + count - 1; while (p <= q) { m = p + (q - p)/2; dxtrace(printk(".")); if (dx_get_hash(m) > hash) q = m - 1; else p = m + 1; } if (0) // linear search cross check { unsigned n = count - 1; at = entries; while (n--) { dxtrace(printk(",")); if (dx_get_hash(++at) > hash) { at--; break; } } assert (at == p - 1); } at = p - 1; dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at))); frame->bh = bh; frame->entries = entries; frame->at = at; if (!indirect--) return frame; if (!(bh = ext4_bread(NULL, dir, dx_get_block(at), 0, err))) { if (!(*err)) *err = ERR_BAD_DX_DIR; goto fail2; } at = entries = ((struct dx_node *) bh->b_data)->entries; if (!buffer_verified(bh) && !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { ext4_warning(dir->i_sb, "Node failed checksum"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } set_buffer_verified(bh); if (dx_get_limit(entries) != dx_node_limit (dir)) { ext4_warning(dir->i_sb, "dx entry: limit != node limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail2; } frame++; frame->bh = NULL; } fail2: while (frame >= frame_in) { brelse(frame->bh); frame--; } fail: if (*err == ERR_BAD_DX_DIR) ext4_warning(dir->i_sb, "Corrupt dir inode %lu, running e2fsck is " "recommended.", dir->i_ino); return NULL; } static void dx_release (struct dx_frame *frames) { if (frames[0].bh == NULL) return; if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels) brelse(frames[1].bh); brelse(frames[0].bh); } /* * This function increments the frame pointer to search the next leaf * block, and reads in the necessary intervening nodes if the search * should be necessary. Whether or not the search is necessary is * controlled by the hash parameter. If the hash value is even, then * the search is only continued if the next block starts with that * hash value. This is used if we are searching for a specific file. * * If the hash value is HASH_NB_ALWAYS, then always go to the next block. * * This function returns 1 if the caller should continue to search, * or 0 if it should not. If there is an error reading one of the * index blocks, it will a negative error code. * * If start_hash is non-null, it will be filled in with the starting * hash of the next page. */ static int ext4_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash) { struct dx_frame *p; struct buffer_head *bh; int err, num_frames = 0; __u32 bhash; p = frame; /* * Find the next leaf page by incrementing the frame pointer. * If we run out of entries in the interior node, loop around and * increment pointer in the parent node. When we break out of * this loop, num_frames indicates the number of interior * nodes need to be read. */ while (1) { if (++(p->at) < p->entries + dx_get_count(p->entries)) break; if (p == frames) return 0; num_frames++; p--; } /* * If the hash is 1, then continue only if the next page has a * continuation hash of any value. This is used for readdir * handling. Otherwise, check to see if the hash matches the * desired contiuation hash. If it doesn't, return since * there's no point to read in the successive index pages. */ bhash = dx_get_hash(p->at); if (start_hash) *start_hash = bhash; if ((hash & 1) == 0) { if ((bhash & ~1) != hash) return 0; } /* * If the hash is HASH_NB_ALWAYS, we always go to the next * block so no check is necessary */ while (num_frames--) { if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at), 0, &err))) { if (!err) { ext4_error(dir->i_sb, "Directory hole detected on inode %lu\n", dir->i_ino); return -EIO; } return err; /* Failure */ } if (!buffer_verified(bh) && !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { ext4_warning(dir->i_sb, "Node failed checksum"); return -EIO; } set_buffer_verified(bh); p++; brelse(p->bh); p->bh = bh; p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; } return 1; } /* * This function fills a red-black tree with information from a * directory block. It returns the number directory entries loaded * into the tree. If there is an error it is returned in err. */ static int htree_dirblock_to_tree(struct file *dir_file, struct inode *dir, ext4_lblk_t block, struct dx_hash_info *hinfo, __u32 start_hash, __u32 start_minor_hash) { struct buffer_head *bh; struct ext4_dir_entry_2 *de, *top; int err = 0, count = 0; dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n", (unsigned long)block)); if (!(bh = ext4_bread(NULL, dir, block, 0, &err))) { if (!err) { err = -EIO; ext4_error(dir->i_sb, "Directory hole detected on inode %lu\n", dir->i_ino); } return err; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) return -EIO; set_buffer_verified(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; top = (struct ext4_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0)); for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) + ((char *)de - bh->b_data))) { /* On error, skip the f_pos to the next block. */ dir_file->f_pos = (dir_file->f_pos | (dir->i_sb->s_blocksize - 1)) + 1; brelse(bh); return count; } ext4fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && (hinfo->minor_hash < start_minor_hash))) continue; if (de->inode == 0) continue; if ((err = ext4_htree_store_dirent(dir_file, hinfo->hash, hinfo->minor_hash, de)) != 0) { brelse(bh); return err; } count++; } brelse(bh); return count; } /* * This function fills a red-black tree with information from a * directory. We start scanning the directory in hash order, starting * at start_hash and start_minor_hash. * * This function returns the number of entries inserted into the tree, * or a negative error code. */ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, __u32 start_minor_hash, __u32 *next_hash) { struct dx_hash_info hinfo; struct ext4_dir_entry_2 *de; struct dx_frame frames[2], *frame; struct inode *dir; ext4_lblk_t block; int count = 0; int ret, err; __u32 hashval; dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", start_hash, start_minor_hash)); dir = dir_file->f_path.dentry->d_inode; if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) { hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, start_hash, start_minor_hash); *next_hash = ~0; return count; } hinfo.hash = start_hash; hinfo.minor_hash = 0; frame = dx_probe(NULL, dir, &hinfo, frames, &err); if (!frame) return err; /* Add '.' and '..' from the htree header */ if (!start_hash && !start_minor_hash) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0) goto errout; count++; } if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; de = ext4_next_entry(de, dir->i_sb->s_blocksize); if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0) goto errout; count++; } while (1) { block = dx_get_block(frame->at); ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo, start_hash, start_minor_hash); if (ret < 0) { err = ret; goto errout; } count += ret; hashval = ~0; ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS, frame, frames, &hashval); *next_hash = hashval; if (ret < 0) { err = ret; goto errout; } /* * Stop if: (a) there are no more entries, or * (b) we have inserted at least one entry and the * next hash value is not a continuation */ if ((ret == 0) || (count && ((hashval & 1) == 0))) break; } dx_release(frames); dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, " "next hash: %x\n", count, *next_hash)); return count; errout: dx_release(frames); return (err); } static inline int search_dirblock(struct buffer_head *bh, struct inode *dir, const struct qstr *d_name, unsigned int offset, struct ext4_dir_entry_2 **res_dir) { return search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir, d_name, offset, res_dir); } /* * Directory block splitting, compacting */ /* * Create map of hash values, offsets, and sizes, stored at end of block. * Returns number of entries mapped. */ static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) { int count = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; while ((char *) de < base + blocksize) { if (de->name_len && de->inode) { ext4fs_dirhash(de->name, de->name_len, &h); map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; map_tail->size = le16_to_cpu(de->rec_len); count++; cond_resched(); } /* XXX: do we need to check rec_len == 0 case? -Chris */ de = ext4_next_entry(de, blocksize); } return count; } /* Sort map by hash value */ static void dx_sort_map (struct dx_map_entry *map, unsigned count) { struct dx_map_entry *p, *q, *top = map + count - 1; int more; /* Combsort until bubble sort doesn't suck */ while (count > 2) { count = count*10/13; if (count - 9 < 2) /* 9, 10 -> 11 */ count = 11; for (p = top, q = p - count; q >= map; p--, q--) if (p->hash < q->hash) swap(*p, *q); } /* Garden variety bubble sort */ do { more = 0; q = top; while (q-- > map) { if (q[1].hash >= q[0].hash) continue; swap(*(q+1), *q); more = 1; } } while(more); } static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block) { struct dx_entry *entries = frame->entries; struct dx_entry *old = frame->at, *new = old + 1; int count = dx_get_count(entries); assert(count < dx_get_limit(entries)); assert(old < entries + count); memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); dx_set_hash(new, hash); dx_set_block(new, block); dx_set_count(entries, count + 1); } /* * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure. * * `len <= EXT4_NAME_LEN' is guaranteed by caller. * `de != NULL' is guaranteed by caller. */ static inline int ext4_match (int len, const char * const name, struct ext4_dir_entry_2 * de) { if (len != de->name_len) return 0; if (!de->inode) return 0; return !memcmp(name, de->name, len); } /* * Returns 0 if not found, -1 on failure, and 1 on success */ int search_dir(struct buffer_head *bh, char *search_buf, int buf_size, struct inode *dir, const struct qstr *d_name, unsigned int offset, struct ext4_dir_entry_2 **res_dir) { struct ext4_dir_entry_2 * de; char * dlimit; int de_len; const char *name = d_name->name; int namelen = d_name->len; de = (struct ext4_dir_entry_2 *)search_buf; dlimit = search_buf + buf_size; while ((char *) de < dlimit) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ if ((char *) de + namelen <= dlimit && ext4_match (namelen, name, de)) { /* found a match - just to be sure, do a full check */ if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, offset)) return -1; *res_dir = de; return 1; } /* prevent looping on a bad block */ de_len = ext4_rec_len_from_disk(de->rec_len, dir->i_sb->s_blocksize); if (de_len <= 0) return -1; offset += de_len; de = (struct ext4_dir_entry_2 *) ((char *) de + de_len); } return 0; } static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block, struct ext4_dir_entry *de) { struct super_block *sb = dir->i_sb; if (!is_dx(dir)) return 0; if (block == 0) return 1; if (de->inode == 0 && ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) == sb->s_blocksize) return 1; return 0; } /* * ext4_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the cache buffer in which the entry was found, and the entry * itself (as a parameter - res_dir). It does NOT read the inode of the * entry - you'll have to do that yourself if you want to. * * The returned buffer_head has ->b_count elevated. The caller is expected * to brelse() it when appropriate. */ static struct buffer_head * ext4_find_entry (struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *inlined) { struct super_block *sb; struct buffer_head *bh_use[NAMEI_RA_SIZE]; struct buffer_head *bh, *ret = NULL; ext4_lblk_t start, block, b; const u8 *name = d_name->name; int ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ int ra_ptr = 0; /* Current index into readahead buffer */ int num = 0; ext4_lblk_t nblocks; int i, err; int namelen; *res_dir = NULL; sb = dir->i_sb; namelen = d_name->len; if (namelen > EXT4_NAME_LEN) return NULL; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; ret = ext4_find_inline_entry(dir, d_name, res_dir, &has_inline_data); if (has_inline_data) { if (inlined) *inlined = 1; return ret; } } if ((namelen <= 2) && (name[0] == '.') && (name[1] == '.' || name[1] == '\0')) { /* * "." or ".." will only be in the first block * NFS may look up ".."; "." should be handled by the VFS */ block = start = 0; nblocks = 1; goto restart; } if (is_dx(dir)) { bh = ext4_dx_find_entry(dir, d_name, res_dir, &err); /* * On success, or if the error was file not found, * return. Otherwise, fall back to doing a search the * old fashioned way. */ if (bh || (err != ERR_BAD_DX_DIR)) return bh; dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " "falling back\n")); } nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); start = EXT4_I(dir)->i_dir_start_lookup; if (start >= nblocks) start = 0; block = start; restart: do { /* * We deal with the read-ahead logic here. */ if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; b = block; for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { /* * Terminate if we reach the end of the * directory and must wrap, or if our * search has finished at this block. */ if (b >= nblocks || (num && block == start)) { bh_use[ra_max] = NULL; break; } num++; bh = ext4_getblk(NULL, dir, b++, 0, &err); bh_use[ra_max] = bh; if (bh) ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; wait_on_buffer(bh); if (!buffer_uptodate(bh)) { /* read error, skip block & hope for the best */ EXT4_ERROR_INODE(dir, "reading directory lblock %lu", (unsigned long) block); brelse(bh); goto next; } if (!buffer_verified(bh) && !is_dx_internal_node(dir, block, (struct ext4_dir_entry *)bh->b_data) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(dir, "checksumming directory " "block %lu", (unsigned long)block); brelse(bh); goto next; } set_buffer_verified(bh); i = search_dirblock(bh, dir, d_name, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (i == 1) { EXT4_I(dir)->i_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; } else { brelse(bh); if (i < 0) goto cleanup_and_exit; } next: if (++block >= nblocks) block = 0; } while (block != start); /* * If the directory has grown while we were searching, then * search the last part of the directory before giving up. */ block = nblocks; nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); if (block < nblocks) { start = 0; goto restart; } cleanup_and_exit: /* Clean up the read-ahead blocks */ for (; ra_ptr < ra_max; ra_ptr++) brelse(bh_use[ra_ptr]); return ret; } static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *err) { struct super_block * sb = dir->i_sb; struct dx_hash_info hinfo; struct dx_frame frames[2], *frame; struct buffer_head *bh; ext4_lblk_t block; int retval; if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err))) return NULL; do { block = dx_get_block(frame->at); if (!(bh = ext4_bread(NULL, dir, block, 0, err))) { if (!(*err)) { *err = -EIO; ext4_error(dir->i_sb, "Directory hole detected on inode %lu\n", dir->i_ino); } goto errout; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(dir, "checksumming directory " "block %lu", (unsigned long)block); brelse(bh); *err = -EIO; goto errout; } set_buffer_verified(bh); retval = search_dirblock(bh, dir, d_name, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (retval == 1) { /* Success! */ dx_release(frames); return bh; } brelse(bh); if (retval == -1) { *err = ERR_BAD_DX_DIR; goto errout; } /* Check to see if we should continue to search */ retval = ext4_htree_next_block(dir, hinfo.hash, frame, frames, NULL); if (retval < 0) { ext4_warning(sb, "error reading index page in directory #%lu", dir->i_ino); *err = retval; goto errout; } } while (retval == 1); *err = -ENOENT; errout: dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name)); dx_release (frames); return NULL; } static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode; struct ext4_dir_entry_2 *de; struct buffer_head *bh; if (dentry->d_name.len > EXT4_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); inode = NULL; if (bh) { __u32 ino = le32_to_cpu(de->inode); brelse(bh); if (!ext4_valid_inum(dir->i_sb, ino)) { EXT4_ERROR_INODE(dir, "bad inode number: %u", ino); return ERR_PTR(-EIO); } if (unlikely(ino == dir->i_ino)) { EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir", dentry->d_name.len, dentry->d_name.name); return ERR_PTR(-EIO); } inode = ext4_iget(dir->i_sb, ino); if (inode == ERR_PTR(-ESTALE)) { EXT4_ERROR_INODE(dir, "deleted inode referenced: %u", ino); return ERR_PTR(-EIO); } } return d_splice_alias(inode, dentry); } struct dentry *ext4_get_parent(struct dentry *child) { __u32 ino; static const struct qstr dotdot = QSTR_INIT("..", 2); struct ext4_dir_entry_2 * de; struct buffer_head *bh; bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL); if (!bh) return ERR_PTR(-ENOENT); ino = le32_to_cpu(de->inode); brelse(bh); if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { EXT4_ERROR_INODE(child->d_inode, "bad parent inode number: %u", ino); return ERR_PTR(-EIO); } return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino)); } #define S_SHIFT 12 static unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFREG >> S_SHIFT] = EXT4_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = EXT4_FT_DIR, [S_IFCHR >> S_SHIFT] = EXT4_FT_CHRDEV, [S_IFBLK >> S_SHIFT] = EXT4_FT_BLKDEV, [S_IFIFO >> S_SHIFT] = EXT4_FT_FIFO, [S_IFSOCK >> S_SHIFT] = EXT4_FT_SOCK, [S_IFLNK >> S_SHIFT] = EXT4_FT_SYMLINK, }; static inline void ext4_set_de_type(struct super_block *sb, struct ext4_dir_entry_2 *de, umode_t mode) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE)) de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; } /* * Move count entries from end of map between two memory locations. * Returns pointer to last entry moved. */ static struct ext4_dir_entry_2 * dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count, unsigned blocksize) { unsigned rec_len = 0; while (count--) { struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) (from + (map->offs<<2)); rec_len = EXT4_DIR_REC_LEN(de->name_len); memcpy (to, de, rec_len); ((struct ext4_dir_entry_2 *) to)->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); de->inode = 0; map++; to += rec_len; } return (struct ext4_dir_entry_2 *) (to - rec_len); } /* * Compact each dir entry in the range to the minimal rec_len. * Returns pointer to last entry in range. */ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize) { struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base; unsigned rec_len = 0; prev = to = de; while ((char*)de < base + blocksize) { next = ext4_next_entry(de, blocksize); if (de->inode && de->name_len) { rec_len = EXT4_DIR_REC_LEN(de->name_len); if (de > to) memmove(to, de, rec_len); to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); prev = to; to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len); } de = next; } return prev; } /* * Split a full leaf block to make room for a new dir entry. * Allocate a new block, and move entries so that they are approx. equally full. * Returns pointer to de in block into which the new entry will be inserted. */ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, struct buffer_head **bh,struct dx_frame *frame, struct dx_hash_info *hinfo, int *error) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count, continued; struct buffer_head *bh2; ext4_lblk_t newblock; u32 hash2; struct dx_map_entry *map; char *data1 = (*bh)->b_data, *data2; unsigned split, move, size; struct ext4_dir_entry_2 *de = NULL, *de2; struct ext4_dir_entry_tail *t; int csum_size = 0; int err = 0, i; if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); bh2 = ext4_append (handle, dir, &newblock, &err); if (!(bh2)) { brelse(*bh); *bh = NULL; goto errout; } BUFFER_TRACE(*bh, "get_write_access"); err = ext4_journal_get_write_access(handle, *bh); if (err) goto journal_error; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; data2 = bh2->b_data; /* create map in the end of data2 block */ map = (struct dx_map_entry *) (data2 + blocksize); count = dx_make_map((struct ext4_dir_entry_2 *) data1, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); /* Split the existing block in the middle, size-wise */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { /* is more than half of this entry in 2nd half of the block? */ if (size + map[i].size/2 > blocksize/2) break; size += map[i].size; move++; } /* map index at which we will split */ split = count - move; hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", (unsigned long)dx_get_block(frame->at), hash2, split, count-split)); /* Fancy dance to stay within two buffers */ de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize); de = dx_pack_dirents(data1, blocksize); de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - (char *) de, blocksize); de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) - (char *) de2, blocksize); if (csum_size) { t = EXT4_DIRENT_TAIL(data2, blocksize); initialize_dirent_tail(t, blocksize); t = EXT4_DIRENT_TAIL(data1, blocksize); initialize_dirent_tail(t, blocksize); } dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); /* Which block gets the new entry? */ if (hinfo->hash >= hash2) { swap(*bh, bh2); de = de2; } dx_insert_block(frame, hash2 + continued, newblock); err = ext4_handle_dirty_dirent_node(handle, dir, bh2); if (err) goto journal_error; err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (err) goto journal_error; brelse(bh2); dxtrace(dx_show_index("frame", frame->entries)); return de; journal_error: brelse(*bh); brelse(bh2); *bh = NULL; ext4_std_error(dir->i_sb, err); errout: *error = err; return NULL; } int ext4_find_dest_de(struct inode *dir, struct inode *inode, struct buffer_head *bh, void *buf, int buf_size, const char *name, int namelen, struct ext4_dir_entry_2 **dest_de) { struct ext4_dir_entry_2 *de; unsigned short reclen = EXT4_DIR_REC_LEN(namelen); int nlen, rlen; unsigned int offset = 0; char *top; de = (struct ext4_dir_entry_2 *)buf; top = buf + buf_size - reclen; while ((char *) de <= top) { if (ext4_check_dir_entry(dir, NULL, de, bh, buf, buf_size, offset)) return -EIO; if (ext4_match(namelen, name, de)) return -EEXIST; nlen = EXT4_DIR_REC_LEN(de->name_len); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if ((de->inode ? rlen - nlen : rlen) >= reclen) break; de = (struct ext4_dir_entry_2 *)((char *)de + rlen); offset += rlen; } if ((char *) de > top) return -ENOSPC; *dest_de = de; return 0; } void ext4_insert_dentry(struct inode *inode, struct ext4_dir_entry_2 *de, int buf_size, const char *name, int namelen) { int nlen, rlen; nlen = EXT4_DIR_REC_LEN(de->name_len); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if (de->inode) { struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen); de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size); de->rec_len = ext4_rec_len_to_disk(nlen, buf_size); de = de1; } de->file_type = EXT4_FT_UNKNOWN; de->inode = cpu_to_le32(inode->i_ino); ext4_set_de_type(inode->i_sb, de, inode->i_mode); de->name_len = namelen; memcpy(de->name, name, namelen); } /* * Add a new entry into a directory (leaf) block. If de is non-NULL, * it points to a directory entry which is guaranteed to be large * enough for new directory entry. If de is NULL, then * add_dirent_to_buf will attempt search the directory block for * space. It will return -ENOSPC if no space is available, and -EIO * and -EEXIST if directory entry already exists. */ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, struct inode *inode, struct ext4_dir_entry_2 *de, struct buffer_head *bh) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned int blocksize = dir->i_sb->s_blocksize; unsigned short reclen; int csum_size = 0; int err; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); reclen = EXT4_DIR_REC_LEN(namelen); if (!de) { err = ext4_find_dest_de(dir, inode, bh, bh->b_data, blocksize - csum_size, name, namelen, &de); if (err) return err; } BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) { ext4_std_error(dir->i_sb, err); return err; } /* By now the buffer is marked for journaling */ ext4_insert_dentry(inode, de, blocksize, name, namelen); /* * XXX shouldn't update any times until successful * completion of syscall, but too many callers depend * on this. * * XXX similarly, too many callers depend on * ext4_new_inode() setting the times, but error * recovery deletes the inode, so the worst that can * happen is that the times are slightly out of date * and/or different from the directory change time. */ dir->i_mtime = dir->i_ctime = ext4_current_time(dir); ext4_update_dx_flag(dir); dir->i_version++; ext4_mark_inode_dirty(handle, dir); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (err) ext4_std_error(dir->i_sb, err); return 0; } /* * This converts a one block unindexed directory to a 3 block indexed * directory, and adds the dentry to the indexed directory. */ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct inode *inode, struct buffer_head *bh) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct buffer_head *bh2; struct dx_root *root; struct dx_frame frames[2], *frame; struct dx_entry *entries; struct ext4_dir_entry_2 *de, *de2; struct ext4_dir_entry_tail *t; char *data1, *top; unsigned len; int retval; unsigned blocksize; struct dx_hash_info hinfo; ext4_lblk_t block; struct fake_dirent *fde; int csum_size = 0; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); blocksize = dir->i_sb->s_blocksize; dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); retval = ext4_journal_get_write_access(handle, bh); if (retval) { ext4_std_error(dir->i_sb, retval); brelse(bh); return retval; } root = (struct dx_root *) bh->b_data; /* The 0th block becomes the root, move the dirents out */ fde = &root->dotdot; de = (struct ext4_dir_entry_2 *)((char *)fde + ext4_rec_len_from_disk(fde->rec_len, blocksize)); if ((char *) de >= (((char *) root) + blocksize)) { EXT4_ERROR_INODE(dir, "invalid rec_len for '..'"); brelse(bh); return -EIO; } len = ((char *) root) + (blocksize - csum_size) - (char *) de; /* Allocate new block for the 0th block's dirents */ bh2 = ext4_append(handle, dir, &block, &retval); if (!(bh2)) { brelse(bh); return retval; } ext4_set_inode_flag(dir, EXT4_INODE_INDEX); data1 = bh2->b_data; memcpy (data1, de, len); de = (struct ext4_dir_entry_2 *) data1; top = data1 + len; while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) de = de2; de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - (char *) de, blocksize); if (csum_size) { t = EXT4_DIRENT_TAIL(data1, blocksize); initialize_dirent_tail(t, blocksize); } /* Initialize the root; the dot dirents already exist */ de = (struct ext4_dir_entry_2 *) (&root->dotdot); de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2), blocksize); memset (&root->info, 0, sizeof(root->info)); root->info.info_length = sizeof(root->info); root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; entries = root->entries; dx_set_block(entries, 1); dx_set_count(entries, 1); dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info))); /* Initialize as for dx_probe */ hinfo.hash_version = root->info.hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; ext4fs_dirhash(name, namelen, &hinfo); frame = frames; frame->entries = entries; frame->at = entries; frame->bh = bh; bh = bh2; ext4_handle_dirty_dx_node(handle, dir, frame->bh); ext4_handle_dirty_dirent_node(handle, dir, bh); de = do_split(handle,dir, &bh, frame, &hinfo, &retval); if (!de) { /* * Even if the block split failed, we have to properly write * out all the changes we did so far. Otherwise we can end up * with corrupted filesystem. */ ext4_mark_inode_dirty(handle, dir); dx_release(frames); return retval; } dx_release(frames); retval = add_dirent_to_buf(handle, dentry, inode, de, bh); brelse(bh); return retval; } /* * ext4_add_entry() * * adds a file entry to the specified directory, using the same * semantics as ext4_find_entry(). It returns NULL if it failed. * * NOTE!! The inode part of 'de' is left at 0 - which means you * may not sleep between calling this and putting something into * the entry, as someone else might have used it while you slept. */ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; struct ext4_dir_entry_tail *t; struct super_block *sb; int retval; int dx_fallback=0; unsigned blocksize; ext4_lblk_t block, blocks; int csum_size = 0; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); sb = dir->i_sb; blocksize = sb->s_blocksize; if (!dentry->d_name.len) return -EINVAL; if (ext4_has_inline_data(dir)) { retval = ext4_try_add_inline_entry(handle, dentry, inode); if (retval < 0) return retval; if (retval == 1) { retval = 0; return retval; } } if (is_dx(dir)) { retval = ext4_dx_add_entry(handle, dentry, inode); if (!retval || (retval != ERR_BAD_DX_DIR)) return retval; ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); dx_fallback++; ext4_mark_inode_dirty(handle, dir); } blocks = dir->i_size >> sb->s_blocksize_bits; for (block = 0; block < blocks; block++) { if (!(bh = ext4_bread(handle, dir, block, 0, &retval))) { if (!retval) { retval = -EIO; ext4_error(inode->i_sb, "Directory hole detected on inode %lu\n", inode->i_ino); } return retval; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) return -EIO; set_buffer_verified(bh); retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); if (retval != -ENOSPC) { brelse(bh); return retval; } if (blocks == 1 && !dx_fallback && EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) return make_indexed_dir(handle, dentry, inode, bh); brelse(bh); } bh = ext4_append(handle, dir, &block, &retval); if (!bh) return retval; de = (struct ext4_dir_entry_2 *) bh->b_data; de->inode = 0; de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize); if (csum_size) { t = EXT4_DIRENT_TAIL(bh->b_data, blocksize); initialize_dirent_tail(t, blocksize); } retval = add_dirent_to_buf(handle, dentry, inode, de, bh); brelse(bh); if (retval == 0) ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); return retval; } /* * Returns 0 for success, or a negative error value */ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode) { struct dx_frame frames[2], *frame; struct dx_entry *entries, *at; struct dx_hash_info hinfo; struct buffer_head *bh; struct inode *dir = dentry->d_parent->d_inode; struct super_block *sb = dir->i_sb; struct ext4_dir_entry_2 *de; int err; frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err); if (!frame) return err; entries = frame->entries; at = frame->at; if (!(bh = ext4_bread(handle, dir, dx_get_block(frame->at), 0, &err))) { if (!err) { err = -EIO; ext4_error(dir->i_sb, "Directory hole detected on inode %lu\n", dir->i_ino); } goto cleanup; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) goto journal_error; set_buffer_verified(bh); BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) goto journal_error; err = add_dirent_to_buf(handle, dentry, inode, NULL, bh); if (err != -ENOSPC) goto cleanup; /* Block full, should compress but for now just split */ dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n", dx_get_count(entries), dx_get_limit(entries))); /* Need to split index? */ if (dx_get_count(entries) == dx_get_limit(entries)) { ext4_lblk_t newblock; unsigned icount = dx_get_count(entries); int levels = frame - frames; struct dx_entry *entries2; struct dx_node *node2; struct buffer_head *bh2; if (levels && (dx_get_count(frames->entries) == dx_get_limit(frames->entries))) { ext4_warning(sb, "Directory index full!"); err = -ENOSPC; goto cleanup; } bh2 = ext4_append (handle, dir, &newblock, &err); if (!(bh2)) goto cleanup; node2 = (struct dx_node *)(bh2->b_data); entries2 = node2->entries; memset(&node2->fake, 0, sizeof(struct fake_dirent)); node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize, sb->s_blocksize); BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; if (levels) { unsigned icount1 = icount/2, icount2 = icount - icount1; unsigned hash2 = dx_get_hash(entries + icount1); dxtrace(printk(KERN_DEBUG "Split index %i/%i\n", icount1, icount2)); BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */ err = ext4_journal_get_write_access(handle, frames[0].bh); if (err) goto journal_error; memcpy((char *) entries2, (char *) (entries + icount1), icount2 * sizeof(struct dx_entry)); dx_set_count(entries, icount1); dx_set_count(entries2, icount2); dx_set_limit(entries2, dx_node_limit(dir)); /* Which index block gets the new entry? */ if (at - entries >= icount1) { frame->at = at = at - entries - icount1 + entries2; frame->entries = entries = entries2; swap(frame->bh, bh2); } dx_insert_block(frames + 0, hash2, newblock); dxtrace(dx_show_index("node", frames[1].entries)); dxtrace(dx_show_index("node", ((struct dx_node *) bh2->b_data)->entries)); err = ext4_handle_dirty_dx_node(handle, dir, bh2); if (err) goto journal_error; brelse (bh2); } else { dxtrace(printk(KERN_DEBUG "Creating second level index...\n")); memcpy((char *) entries2, (char *) entries, icount * sizeof(struct dx_entry)); dx_set_limit(entries2, dx_node_limit(dir)); /* Set up root */ dx_set_count(entries, 1); dx_set_block(entries + 0, newblock); ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1; /* Add new access path frame */ frame = frames + 1; frame->at = at = at - entries + entries2; frame->entries = entries = entries2; frame->bh = bh2; err = ext4_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; } err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh); if (err) { ext4_std_error(inode->i_sb, err); goto cleanup; } } de = do_split(handle, dir, &bh, frame, &hinfo, &err); if (!de) goto cleanup; err = add_dirent_to_buf(handle, dentry, inode, de, bh); goto cleanup; journal_error: ext4_std_error(dir->i_sb, err); cleanup: if (bh) brelse(bh); dx_release(frames); return err; } /* * ext4_generic_delete_entry deletes a directory entry by merging it * with the previous entry */ int ext4_generic_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, void *entry_buf, int buf_size, int csum_size) { struct ext4_dir_entry_2 *de, *pde; unsigned int blocksize = dir->i_sb->s_blocksize; int i; i = 0; pde = NULL; de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, i)) return -EIO; if (de == de_del) { if (pde) pde->rec_len = ext4_rec_len_to_disk( ext4_rec_len_from_disk(pde->rec_len, blocksize) + ext4_rec_len_from_disk(de->rec_len, blocksize), blocksize); else de->inode = 0; dir->i_version++; return 0; } i += ext4_rec_len_from_disk(de->rec_len, blocksize); pde = de; de = ext4_next_entry(de, blocksize); } return -ENOENT; } static int ext4_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh) { int err, csum_size = 0; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; err = ext4_delete_inline_entry(handle, dir, de_del, bh, &has_inline_data); if (has_inline_data) return err; } if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (unlikely(err)) goto out; err = ext4_generic_delete_entry(handle, dir, de_del, bh, bh->b_data, dir->i_sb->s_blocksize, csum_size); if (err) goto out; BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (unlikely(err)) goto out; return 0; out: if (err != -ENOENT) ext4_std_error(dir->i_sb, err); return err; } /* * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2, * since this indicates that nlinks count was previously 1. */ static void ext4_inc_count(handle_t *handle, struct inode *inode) { inc_nlink(inode); if (is_dx(inode) && inode->i_nlink > 1) { /* limit is 16-bit i_links_count */ if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) { set_nlink(inode, 1); EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_DIR_NLINK); } } } /* * If a directory had nlink == 1, then we should let it be 1. This indicates * directory has >EXT4_LINK_MAX subdirs. */ static void ext4_dec_count(handle_t *handle, struct inode *inode) { if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) drop_nlink(inode); } static int ext4_add_nondir(handle_t *handle, struct dentry *dentry, struct inode *inode) { int err = ext4_add_entry(handle, dentry, inode); if (!err) { ext4_mark_inode_dirty(handle, inode); unlock_new_inode(inode); d_instantiate(dentry, inode); return 0; } drop_nlink(inode); unlock_new_inode(inode); iput(inode); return err; } /* * By the time this is called, we already have created * the directory cache entry for the new file, but it * is so far negative - it has no inode. * * If the create succeeds, we fill in the inode information * with d_instantiate(). */ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { handle_t *handle; struct inode *inode; int err, retries = 0; dquot_initialize(dir); retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); err = ext4_add_nondir(handle, dentry, inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext4_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { handle_t *handle; struct inode *inode; int err, retries = 0; if (!new_valid_dev(rdev)) return -EINVAL; dquot_initialize(dir); retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &ext4_special_inode_operations; err = ext4_add_nondir(handle, dentry, inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, struct ext4_dir_entry_2 *de, int blocksize, int csum_size, unsigned int parent_ino, int dotdot_real_len) { de->inode = cpu_to_le32(inode->i_ino); de->name_len = 1; de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len), blocksize); strcpy(de->name, "."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); de = ext4_next_entry(de, blocksize); de->inode = cpu_to_le32(parent_ino); de->name_len = 2; if (!dotdot_real_len) de->rec_len = ext4_rec_len_to_disk(blocksize - (csum_size + EXT4_DIR_REC_LEN(1)), blocksize); else de->rec_len = ext4_rec_len_to_disk( EXT4_DIR_REC_LEN(de->name_len), blocksize); strcpy(de->name, ".."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); return ext4_next_entry(de, blocksize); } static int ext4_init_new_dir(handle_t *handle, struct inode *dir, struct inode *inode) { struct buffer_head *dir_block = NULL; struct ext4_dir_entry_2 *de; struct ext4_dir_entry_tail *t; unsigned int blocksize = dir->i_sb->s_blocksize; int csum_size = 0; int err; if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { err = ext4_try_create_inline_dir(handle, dir, inode); if (err < 0 && err != -ENOSPC) goto out; if (!err) goto out; } inode->i_size = EXT4_I(inode)->i_disksize = blocksize; dir_block = ext4_bread(handle, inode, 0, 1, &err); if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) { if (!err) { err = -EIO; ext4_error(inode->i_sb, "Directory hole detected on inode %lu\n", inode->i_ino); } goto out; } BUFFER_TRACE(dir_block, "get_write_access"); err = ext4_journal_get_write_access(handle, dir_block); if (err) goto out; de = (struct ext4_dir_entry_2 *)dir_block->b_data; ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0); set_nlink(inode, 2); if (csum_size) { t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize); initialize_dirent_tail(t, blocksize); } BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); if (err) goto out; set_buffer_verified(dir_block); out: brelse(dir_block); return err; } static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { handle_t *handle; struct inode *inode; int err, retries = 0; if (EXT4_DIR_LINK_MAX(dir)) return -EMLINK; dquot_initialize(dir); retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode(handle, dir, S_IFDIR | mode, &dentry->d_name, 0, NULL); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_stop; inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; err = ext4_init_new_dir(handle, dir, inode); if (err) goto out_clear_inode; err = ext4_mark_inode_dirty(handle, inode); if (!err) err = ext4_add_entry(handle, dentry, inode); if (err) { out_clear_inode: clear_nlink(inode); unlock_new_inode(inode); ext4_mark_inode_dirty(handle, inode); iput(inode); goto out_stop; } ext4_inc_count(handle, dir); ext4_update_dx_flag(dir); err = ext4_mark_inode_dirty(handle, dir); if (err) goto out_clear_inode; unlock_new_inode(inode); d_instantiate(dentry, inode); out_stop: ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } /* * routine to check that the specified directory is empty (for rmdir) */ static int empty_dir(struct inode *inode) { unsigned int offset; struct buffer_head *bh; struct ext4_dir_entry_2 *de, *de1; struct super_block *sb; int err = 0; if (ext4_has_inline_data(inode)) { int has_inline_data = 1; err = empty_inline_dir(inode, &has_inline_data); if (has_inline_data) return err; } sb = inode->i_sb; if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || !(bh = ext4_bread(NULL, inode, 0, 0, &err))) { if (err) EXT4_ERROR_INODE(inode, "error %d reading directory lblock 0", err); else ext4_warning(inode->i_sb, "bad directory (dir #%lu) - no data block", inode->i_ino); return 1; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(inode, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(inode, "checksum error reading directory " "lblock 0"); return -EIO; } set_buffer_verified(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; de1 = ext4_next_entry(de, sb->s_blocksize); if (le32_to_cpu(de->inode) != inode->i_ino || !le32_to_cpu(de1->inode) || strcmp(".", de->name) || strcmp("..", de1->name)) { ext4_warning(inode->i_sb, "bad directory (dir #%lu) - no `.' or `..'", inode->i_ino); brelse(bh); return 1; } offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) + ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize); de = ext4_next_entry(de1, sb->s_blocksize); while (offset < inode->i_size) { if (!bh || (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { unsigned int lblock; err = 0; brelse(bh); lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb); bh = ext4_bread(NULL, inode, lblock, 0, &err); if (!bh) { if (err) EXT4_ERROR_INODE(inode, "error %d reading directory " "lblock %u", err, lblock); else ext4_warning(inode->i_sb, "bad directory (dir #%lu) - no data block", inode->i_ino); offset += sb->s_blocksize; continue; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(inode, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(inode, "checksum error " "reading directory lblock 0"); return -EIO; } set_buffer_verified(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; } if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, offset)) { de = (struct ext4_dir_entry_2 *)(bh->b_data + sb->s_blocksize); offset = (offset | (sb->s_blocksize - 1)) + 1; continue; } if (le32_to_cpu(de->inode)) { brelse(bh); return 0; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); de = ext4_next_entry(de, sb->s_blocksize); } brelse(bh); return 1; } /* ext4_orphan_add() links an unlinked or truncated inode into a list of * such inodes, starting at the superblock, in case we crash before the * file is closed/deleted, or in case the inode truncate spans multiple * transactions and the last transaction is not recovered after a crash. * * At filesystem recovery time, we walk this list deleting unlinked * inodes and truncating linked inodes in ext4_orphan_cleanup(). */ int ext4_orphan_add(handle_t *handle, struct inode *inode) { struct super_block *sb = inode->i_sb; struct ext4_iloc iloc; int err = 0, rc; if (!EXT4_SB(sb)->s_journal) return 0; mutex_lock(&EXT4_SB(sb)->s_orphan_lock); if (!list_empty(&EXT4_I(inode)->i_orphan)) goto out_unlock; /* * Orphan handling is only valid for files with data blocks * being truncated, or files being unlinked. Note that we either * hold i_mutex, or the inode can not be referenced from outside, * so i_nlink should not be bumped due to race */ J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (err) goto out_unlock; err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto out_unlock; /* * Due to previous errors inode may be already a part of on-disk * orphan list. If so skip on-disk list modification. */ if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <= (le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) goto mem_insert; /* Insert this inode at the head of the on-disk orphan list... */ NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); err = ext4_handle_dirty_super(handle, sb); rc = ext4_mark_iloc_dirty(handle, inode, &iloc); if (!err) err = rc; /* Only add to the head of the in-memory list if all the * previous operations succeeded. If the orphan_add is going to * fail (possibly taking the journal offline), we can't risk * leaving the inode on the orphan list: stray orphan-list * entries can cause panics at unmount time. * * This is safe: on error we're going to ignore the orphan list * anyway on the next recovery. */ mem_insert: if (!err) list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); jbd_debug(4, "superblock will point to %lu\n", inode->i_ino); jbd_debug(4, "orphan inode %lu will point to %d\n", inode->i_ino, NEXT_ORPHAN(inode)); out_unlock: mutex_unlock(&EXT4_SB(sb)->s_orphan_lock); ext4_std_error(inode->i_sb, err); return err; } /* * ext4_orphan_del() removes an unlinked or truncated inode from the list * of such inodes stored on disk, because it is finally being cleaned up. */ int ext4_orphan_del(handle_t *handle, struct inode *inode) { struct list_head *prev; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi; __u32 ino_next; struct ext4_iloc iloc; int err = 0; if ((!EXT4_SB(inode->i_sb)->s_journal) && !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) return 0; mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock); if (list_empty(&ei->i_orphan)) goto out; ino_next = NEXT_ORPHAN(inode); prev = ei->i_orphan.prev; sbi = EXT4_SB(inode->i_sb); jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino); list_del_init(&ei->i_orphan); /* If we're on an error path, we may not have a valid * transaction handle with which to update the orphan list on * disk, but we still need to remove the inode from the linked * list in memory. */ if (!handle) goto out; err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto out_err; if (prev == &sbi->s_orphan) { jbd_debug(4, "superblock will point to %u\n", ino_next); BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto out_brelse; sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); err = ext4_handle_dirty_super(handle, inode->i_sb); } else { struct ext4_iloc iloc2; struct inode *i_prev = &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode; jbd_debug(4, "orphan inode %lu will point to %u\n", i_prev->i_ino, ino_next); err = ext4_reserve_inode_write(handle, i_prev, &iloc2); if (err) goto out_brelse; NEXT_ORPHAN(i_prev) = ino_next; err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2); } if (err) goto out_brelse; NEXT_ORPHAN(inode) = 0; err = ext4_mark_iloc_dirty(handle, inode, &iloc); out_err: ext4_std_error(inode->i_sb, err); out: mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock); return err; out_brelse: brelse(iloc.bh); goto out_err; } static int ext4_rmdir(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle; /* Initialize quotas before so that eventual writes go in * separate transaction */ dquot_initialize(dir); dquot_initialize(dentry->d_inode); handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (!bh) goto end_rmdir; if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = dentry->d_inode; retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_rmdir; retval = -ENOTEMPTY; if (!empty_dir(inode)) goto end_rmdir; retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto end_rmdir; if (!EXT4_DIR_LINK_EMPTY(inode)) ext4_warning(inode->i_sb, "empty directory has too many links (%d)", inode->i_nlink); inode->i_version++; clear_nlink(inode); /* There's no need to set i_disksize: the fact that i_nlink is * zero will ensure that the right thing happens during any * recovery. */ inode->i_size = 0; ext4_orphan_add(handle, inode); inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); ext4_dec_count(handle, dir); ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); end_rmdir: ext4_journal_stop(handle); brelse(bh); return retval; } static int ext4_unlink(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle; trace_ext4_unlink_enter(dir, dentry); /* Initialize quotas before so that eventual writes go * in separate transaction */ dquot_initialize(dir); dquot_initialize(dentry->d_inode); handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (!bh) goto end_unlink; inode = dentry->d_inode; retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_unlink; if (!inode->i_nlink) { ext4_warning(inode->i_sb, "Deleting nonexistent file (%lu), %d", inode->i_ino, inode->i_nlink); set_nlink(inode, 1); } retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto end_unlink; dir->i_ctime = dir->i_mtime = ext4_current_time(dir); ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); drop_nlink(inode); if (!inode->i_nlink) ext4_orphan_add(handle, inode); inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); retval = 0; end_unlink: ext4_journal_stop(handle); brelse(bh); trace_ext4_unlink_exit(dentry, retval); return retval; } static int ext4_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { handle_t *handle; struct inode *inode; int l, err, retries = 0; int credits; l = strlen(symname)+1; if (l > dir->i_sb->s_blocksize) return -ENAMETOOLONG; dquot_initialize(dir); if (l > EXT4_N_BLOCKS * 4) { /* * For non-fast symlinks, we just allocate inode and put it on * orphan list in the first transaction => we need bitmap, * group descriptor, sb, inode block, quota blocks, and * possibly selinux xattr blocks. */ credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + EXT4_XATTR_TRANS_BLOCKS; } else { /* * Fast symlink. We have to add entry to directory * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS), * allocate new inode (bitmap, group descriptor, inode block, * quota blocks, sb is already counted in previous macros). */ credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb); } retry: handle = ext4_journal_start(dir, credits); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO, &dentry->d_name, 0, NULL); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_stop; if (l > EXT4_N_BLOCKS * 4) { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); /* * We cannot call page_symlink() with transaction started * because it calls into ext4_write_begin() which can wait * for transaction commit if we are running out of space * and thus we deadlock. So we have to stop transaction now * and restart it when symlink contents is written. * * To keep fs consistent in case of crash, we have to put inode * to orphan list in the mean time. */ drop_nlink(inode); err = ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (err) goto err_drop_inode; err = __page_symlink(inode, symname, l, 1); if (err) goto err_drop_inode; /* * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified */ handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto err_drop_inode; } set_nlink(inode, 1); err = ext4_orphan_del(handle, inode); if (err) { ext4_journal_stop(handle); clear_nlink(inode); goto err_drop_inode; } } else { /* clear the extent format for fast symlink */ ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); inode->i_op = &ext4_fast_symlink_inode_operations; memcpy((char *)&EXT4_I(inode)->i_data, symname, l); inode->i_size = l-1; } EXT4_I(inode)->i_disksize = inode->i_size; err = ext4_add_nondir(handle, dentry, inode); out_stop: ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; err_drop_inode: unlock_new_inode(inode); iput(inode); return err; } static int ext4_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { handle_t *handle; struct inode *inode = old_dentry->d_inode; int err, retries = 0; if (inode->i_nlink >= EXT4_LINK_MAX) return -EMLINK; dquot_initialize(dir); retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode->i_ctime = ext4_current_time(inode); ext4_inc_count(handle, inode); ihold(inode); err = ext4_add_entry(handle, dentry, inode); if (!err) { ext4_mark_inode_dirty(handle, inode); d_instantiate(dentry, inode); } else { drop_nlink(inode); iput(inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } /* * Try to find buffer head where contains the parent block. * It should be the inode block if it is inlined or the 1st block * if it is a normal dir. */ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, struct inode *inode, int *retval, struct ext4_dir_entry_2 **parent_de, int *inlined) { struct buffer_head *bh; if (!ext4_has_inline_data(inode)) { if (!(bh = ext4_bread(handle, inode, 0, 0, retval))) { if (!*retval) { *retval = -EIO; ext4_error(inode->i_sb, "Directory hole detected on inode %lu\n", inode->i_ino); } return NULL; } *parent_de = ext4_next_entry( (struct ext4_dir_entry_2 *)bh->b_data, inode->i_sb->s_blocksize); return bh; } *inlined = 1; return ext4_get_first_inline_block(inode, parent_de, retval); } /* * Anybody can rename anything with this: the permission checks are left to the * higher-level routines. */ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { handle_t *handle; struct inode *old_inode, *new_inode; struct buffer_head *old_bh, *new_bh, *dir_bh; struct ext4_dir_entry_2 *old_de, *new_de; int retval, force_da_alloc = 0; int inlined = 0, new_inlined = 0; struct ext4_dir_entry_2 *parent_de; dquot_initialize(old_dir); dquot_initialize(new_dir); old_bh = new_bh = dir_bh = NULL; /* Initialize quotas before so that eventual writes go * in separate transaction */ if (new_dentry->d_inode) dquot_initialize(new_dentry->d_inode); handle = ext4_journal_start(old_dir, 2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) ext4_handle_sync(handle); old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL); /* * Check for inode number is _not_ due to possible IO errors. * We might rmdir the source, keep it as pwd of some process * and merrily kill the link to whatever was created under the * same name. Goodbye sticky bit ;-< */ old_inode = old_dentry->d_inode; retval = -ENOENT; if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino) goto end_rename; new_inode = new_dentry->d_inode; new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de, &new_inlined); if (new_bh) { if (!new_inode) { brelse(new_bh); new_bh = NULL; } } if (S_ISDIR(old_inode->i_mode)) { if (new_inode) { retval = -ENOTEMPTY; if (!empty_dir(new_inode)) goto end_rename; } retval = -EIO; dir_bh = ext4_get_first_dir_block(handle, old_inode, &retval, &parent_de, &inlined); if (!dir_bh) goto end_rename; if (!inlined && !buffer_verified(dir_bh) && !ext4_dirent_csum_verify(old_inode, (struct ext4_dir_entry *)dir_bh->b_data)) goto end_rename; set_buffer_verified(dir_bh); if (le32_to_cpu(parent_de->inode) != old_dir->i_ino) goto end_rename; retval = -EMLINK; if (!new_inode && new_dir != old_dir && EXT4_DIR_LINK_MAX(new_dir)) goto end_rename; BUFFER_TRACE(dir_bh, "get_write_access"); retval = ext4_journal_get_write_access(handle, dir_bh); if (retval) goto end_rename; } if (!new_bh) { retval = ext4_add_entry(handle, new_dentry, old_inode); if (retval) goto end_rename; } else { BUFFER_TRACE(new_bh, "get write access"); retval = ext4_journal_get_write_access(handle, new_bh); if (retval) goto end_rename; new_de->inode = cpu_to_le32(old_inode->i_ino); if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb, EXT4_FEATURE_INCOMPAT_FILETYPE)) new_de->file_type = old_de->file_type; new_dir->i_version++; new_dir->i_ctime = new_dir->i_mtime = ext4_current_time(new_dir); ext4_mark_inode_dirty(handle, new_dir); BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata"); if (!new_inlined) { retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh); if (unlikely(retval)) { ext4_std_error(new_dir->i_sb, retval); goto end_rename; } } brelse(new_bh); new_bh = NULL; } /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ old_inode->i_ctime = ext4_current_time(old_inode); ext4_mark_inode_dirty(handle, old_inode); /* * ok, that's it */ if (le32_to_cpu(old_de->inode) != old_inode->i_ino || old_de->name_len != old_dentry->d_name.len || strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) || (retval = ext4_delete_entry(handle, old_dir, old_de, old_bh)) == -ENOENT) { /* old_de could have moved from under us during htree split, so * make sure that we are deleting the right entry. We might * also be pointing to a stale entry in the unused part of * old_bh so just checking inum and the name isn't enough. */ struct buffer_head *old_bh2; struct ext4_dir_entry_2 *old_de2; old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2, NULL); if (old_bh2) { retval = ext4_delete_entry(handle, old_dir, old_de2, old_bh2); brelse(old_bh2); } } if (retval) { ext4_warning(old_dir->i_sb, "Deleting old file (%lu), %d, error=%d", old_dir->i_ino, old_dir->i_nlink, retval); } if (new_inode) { ext4_dec_count(handle, new_inode); new_inode->i_ctime = ext4_current_time(new_inode); } old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir); ext4_update_dx_flag(old_dir); if (dir_bh) { parent_de->inode = cpu_to_le32(new_dir->i_ino); BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata"); if (!inlined) { if (is_dx(old_inode)) { retval = ext4_handle_dirty_dx_node(handle, old_inode, dir_bh); } else { retval = ext4_handle_dirty_dirent_node(handle, old_inode, dir_bh); } } else { retval = ext4_mark_inode_dirty(handle, old_inode); } if (retval) { ext4_std_error(old_dir->i_sb, retval); goto end_rename; } ext4_dec_count(handle, old_dir); if (new_inode) { /* checked empty_dir above, can't have another parent, * ext4_dec_count() won't work for many-linked dirs */ clear_nlink(new_inode); } else { ext4_inc_count(handle, new_dir); ext4_update_dx_flag(new_dir); ext4_mark_inode_dirty(handle, new_dir); } } ext4_mark_inode_dirty(handle, old_dir); if (new_inode) { ext4_mark_inode_dirty(handle, new_inode); if (!new_inode->i_nlink) ext4_orphan_add(handle, new_inode); if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC)) force_da_alloc = 1; } retval = 0; end_rename: brelse(dir_bh); brelse(old_bh); brelse(new_bh); ext4_journal_stop(handle); if (retval == 0 && force_da_alloc) ext4_alloc_da_blocks(old_inode); return retval; } /* * directories can handle most operations... */ const struct inode_operations ext4_dir_inode_operations = { .create = ext4_create, .lookup = ext4_lookup, .link = ext4_link, .unlink = ext4_unlink, .symlink = ext4_symlink, .mkdir = ext4_mkdir, .rmdir = ext4_rmdir, .mknod = ext4_mknod, .rename = ext4_rename, .setattr = ext4_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, .get_acl = ext4_get_acl, .fiemap = ext4_fiemap, }; const struct inode_operations ext4_special_inode_operations = { .setattr = ext4_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, .get_acl = ext4_get_acl, };
./CrossVul/dataset_final_sorted/CWE-399/c/good_5625_0
crossvul-cpp_data_bad_3520_0
/* * Handle firewalling * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * Bart De Schuymer <bdschuym@pandora.be> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Lennert dedicates this file to Kerstin Wurdinger. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/if_pppox.h> #include <linux/ppp_defs.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter_arp.h> #include <linux/in_route.h> #include <linux/inetdevice.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/route.h> #include <asm/uaccess.h> #include "br_private.h" #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #define skb_origaddr(skb) (((struct bridge_skb_cb *) \ (skb->nf_bridge->data))->daddr.ipv4) #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr) #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr) #ifdef CONFIG_SYSCTL static struct ctl_table_header *brnf_sysctl_header; static int brnf_call_iptables __read_mostly = 1; static int brnf_call_ip6tables __read_mostly = 1; static int brnf_call_arptables __read_mostly = 1; static int brnf_filter_vlan_tagged __read_mostly = 0; static int brnf_filter_pppoe_tagged __read_mostly = 0; #else #define brnf_call_iptables 1 #define brnf_call_ip6tables 1 #define brnf_call_arptables 1 #define brnf_filter_vlan_tagged 0 #define brnf_filter_pppoe_tagged 0 #endif static inline __be16 vlan_proto(const struct sk_buff *skb) { if (vlan_tx_tag_present(skb)) return skb->protocol; else if (skb->protocol == htons(ETH_P_8021Q)) return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; else return 0; } #define IS_VLAN_IP(skb) \ (vlan_proto(skb) == htons(ETH_P_IP) && \ brnf_filter_vlan_tagged) #define IS_VLAN_IPV6(skb) \ (vlan_proto(skb) == htons(ETH_P_IPV6) && \ brnf_filter_vlan_tagged) #define IS_VLAN_ARP(skb) \ (vlan_proto(skb) == htons(ETH_P_ARP) && \ brnf_filter_vlan_tagged) static inline __be16 pppoe_proto(const struct sk_buff *skb) { return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN + sizeof(struct pppoe_hdr))); } #define IS_PPPOE_IP(skb) \ (skb->protocol == htons(ETH_P_PPP_SES) && \ pppoe_proto(skb) == htons(PPP_IP) && \ brnf_filter_pppoe_tagged) #define IS_PPPOE_IPV6(skb) \ (skb->protocol == htons(ETH_P_PPP_SES) && \ pppoe_proto(skb) == htons(PPP_IPV6) && \ brnf_filter_pppoe_tagged) static void fake_update_pmtu(struct dst_entry *dst, u32 mtu) { } static struct dst_ops fake_dst_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .update_pmtu = fake_update_pmtu, }; /* * Initialize bogus route table used to keep netfilter happy. * Currently, we fill in the PMTU entry because netfilter * refragmentation needs it, and the rt_flags entry because * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; atomic_set(&rt->dst.__refcnt, 1); rt->dst.dev = br->dev; rt->dst.path = &rt->dst; dst_metric_set(&rt->dst, RTAX_MTU, 1500); rt->dst.flags = DST_NOXFRM; rt->dst.ops = &fake_dst_ops; } static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) { struct net_bridge_port *port; port = br_port_get_rcu(dev); return port ? &port->br->fake_rtable : NULL; } static inline struct net_device *bridge_parent(const struct net_device *dev) { struct net_bridge_port *port; port = br_port_get_rcu(dev); return port ? port->br->dev : NULL; } static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) { skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC); if (likely(skb->nf_bridge)) atomic_set(&(skb->nf_bridge->use), 1); return skb->nf_bridge; } static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (atomic_read(&nf_bridge->use) > 1) { struct nf_bridge_info *tmp = nf_bridge_alloc(skb); if (tmp) { memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); atomic_set(&tmp->use, 1); } nf_bridge_put(nf_bridge); nf_bridge = tmp; } return nf_bridge; } static inline void nf_bridge_push_encap_header(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_push(skb, len); skb->network_header -= len; } static inline void nf_bridge_pull_encap_header(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_pull(skb, len); skb->network_header += len; } static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_pull_rcsum(skb, len); skb->network_header += len; } static inline void nf_bridge_save_header(struct sk_buff *skb) { int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); skb_copy_from_linear_data_offset(skb, -header_size, skb->nf_bridge->data, header_size); } static inline void nf_bridge_update_protocol(struct sk_buff *skb) { if (skb->nf_bridge->mask & BRNF_8021Q) skb->protocol = htons(ETH_P_8021Q); else if (skb->nf_bridge->mask & BRNF_PPPoE) skb->protocol = htons(ETH_P_PPP_SES); } /* When handing a packet over to the IP layer * check whether we have a skb that is in the * expected format */ static int br_parse_ip_options(struct sk_buff *skb) { struct ip_options *opt; struct iphdr *iph; struct net_device *dev = skb->dev; u32 len; iph = ip_hdr(skb); opt = &(IPCB(skb)->opt); /* Basic sanity checks */ if (iph->ihl < 5 || iph->version != 4) goto inhdr_error; if (!pskb_may_pull(skb, iph->ihl*4)) goto inhdr_error; iph = ip_hdr(skb); if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) goto inhdr_error; len = ntohs(iph->tot_len); if (skb->len < len) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } else if (len < (iph->ihl*4)) goto inhdr_error; if (pskb_trim_rcsum(skb, len)) { IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); goto drop; } /* Zero out the CB buffer if no options present */ if (iph->ihl == 5) { memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); return 0; } opt->optlen = iph->ihl*4 - sizeof(struct iphdr); if (ip_options_compile(dev_net(dev), opt, skb)) goto inhdr_error; /* Check correct handling of SRR option */ if (unlikely(opt->srr)) { struct in_device *in_dev = __in_dev_get_rcu(dev); if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev)) goto drop; if (ip_options_rcv_srr(skb)) goto drop; } return 0; inhdr_error: IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); drop: return -1; } /* Fill in the header for fragmented IP packets handled by * the IPv4 connection tracking code. */ int nf_bridge_copy_header(struct sk_buff *skb) { int err; unsigned int header_size; nf_bridge_update_protocol(skb); header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); err = skb_cow_head(skb, header_size); if (err) return err; skb_copy_to_linear_data_offset(skb, -header_size, skb->nf_bridge->data, header_size); __skb_push(skb, nf_bridge_encap_header_len(skb)); return 0; } /* PF_BRIDGE/PRE_ROUTING *********************************************/ /* Undo the changes made for ip6tables PREROUTING and continue the * bridge PRE_ROUTING hook. */ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; rt = bridge_parent_rtable(nf_bridge->physindev); if (!rt) { kfree_skb(skb); return 0; } skb_dst_set_noref(skb, &rt->dst); skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); return 0; } /* Obtain the correct destination MAC address, while preserving the original * source MAC address. If we already know this address, we just copy it. If we * don't, we use the neighbour framework to find out. In both cases, we make * sure that br_handle_frame_finish() is called afterwards. */ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct dst_entry *dst; skb->dev = bridge_parent(skb->dev); if (!skb->dev) goto free_skb; dst = skb_dst(skb); if (dst->hh) { neigh_hh_bridge(dst->hh, skb); skb->dev = nf_bridge->physindev; return br_handle_frame_finish(skb); } else if (dst->neighbour) { /* the neighbour function below overwrites the complete * MAC header, so we save the Ethernet source address and * protocol number. */ skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); /* tell br_dev_xmit to continue with forwarding */ nf_bridge->mask |= BRNF_BRIDGED_DNAT; return dst->neighbour->output(skb); } free_skb: kfree_skb(skb); return 0; } /* This requires some explaining. If DNAT has taken place, * we will need to fix up the destination Ethernet address. * * There are two cases to consider: * 1. The packet was DNAT'ed to a device in the same bridge * port group as it was received on. We can still bridge * the packet. * 2. The packet was DNAT'ed to a different device, either * a non-bridged device or another bridge port group. * The packet will need to be routed. * * The correct way of distinguishing between these two cases is to * call ip_route_input() and to look at skb->dst->dev, which is * changed to the destination device if ip_route_input() succeeds. * * Let's first consider the case that ip_route_input() succeeds: * * If the output device equals the logical bridge device the packet * came in on, we can consider this bridging. The corresponding MAC * address will be obtained in br_nf_pre_routing_finish_bridge. * Otherwise, the packet is considered to be routed and we just * change the destination MAC address so that the packet will * later be passed up to the IP stack to be routed. For a redirected * packet, ip_route_input() will give back the localhost as output device, * which differs from the bridge device. * * Let's now consider the case that ip_route_input() fails: * * This can be because the destination address is martian, in which case * the packet will be dropped. * If IP forwarding is disabled, ip_route_input() will fail, while * ip_route_output_key() can return success. The source * address for ip_route_output_key() is set to zero, so ip_route_output_key() * thinks we're handling a locally generated packet and won't care * if IP forwarding is enabled. If the output device equals the logical bridge * device, we proceed as if ip_route_input() succeeded. If it differs from the * logical bridge port or if ip_route_output_key() fails we drop the packet. */ static int br_nf_pre_routing_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; int err; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; if (dnat_took_place(skb)) { if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a * martian destination or due to the fact that * forwarding is disabled. For most martian packets, * ip_route_output_key() will fail. It won't fail for 2 types of * martian destinations: loopback destinations and destination * 0.0.0.0. In both cases the packet will be dropped because the * destination is the loopback device and not the bridge. */ if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) goto free_skb; rt = ip_route_output(dev_net(dev), iph->daddr, 0, RT_TOS(iph->tos), 0); if (!IS_ERR(rt)) { /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (rt->dst.dev == dev) { skb_dst_set(skb, &rt->dst); goto bridged_dnat; } ip_rt_put(rt); } free_skb: kfree_skb(skb); return 0; } else { if (skb_dst(skb)->dev == dev) { bridged_dnat: skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge, 1); return 0; } memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN); skb->pkt_type = PACKET_HOST; } } else { rt = bridge_parent_rtable(nf_bridge->physindev); if (!rt) { kfree_skb(skb); return 0; } skb_dst_set_noref(skb, &rt->dst); } skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); return 0; } /* Some common code for IPv4/IPv6 */ static struct net_device *setup_pre_routing(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; nf_bridge->physindev = skb->dev; skb->dev = bridge_parent(skb->dev); if (skb->protocol == htons(ETH_P_8021Q)) nf_bridge->mask |= BRNF_8021Q; else if (skb->protocol == htons(ETH_P_PPP_SES)) nf_bridge->mask |= BRNF_PPPoE; return skb->dev; } /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */ static int check_hbh_len(struct sk_buff *skb) { unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1); u32 pkt_len; const unsigned char *nh = skb_network_header(skb); int off = raw - nh; int len = (raw[1] + 1) << 3; if ((raw + len) - skb->data > skb_headlen(skb)) goto bad; off += 2; len -= 2; while (len > 0) { int optlen = nh[off + 1] + 2; switch (nh[off]) { case IPV6_TLV_PAD0: optlen = 1; break; case IPV6_TLV_PADN: break; case IPV6_TLV_JUMBO: if (nh[off + 1] != 4 || (off & 3) != 2) goto bad; pkt_len = ntohl(*(__be32 *) (nh + off + 2)); if (pkt_len <= IPV6_MAXPLEN || ipv6_hdr(skb)->payload_len) goto bad; if (pkt_len > skb->len - sizeof(struct ipv6hdr)) goto bad; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto bad; nh = skb_network_header(skb); break; default: if (optlen > len) goto bad; break; } off += optlen; len -= optlen; } if (len == 0) return 0; bad: return -1; } /* Replicate the checks that IPv6 does on packet reception and pass the packet * to ip6tables, which doesn't support NAT, so things are fairly simple. */ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct ipv6hdr *hdr; u32 pkt_len; if (skb->len < sizeof(struct ipv6hdr)) return NF_DROP; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return NF_DROP; hdr = ipv6_hdr(skb); if (hdr->version != 6) return NF_DROP; pkt_len = ntohs(hdr->payload_len); if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) return NF_DROP; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) return NF_DROP; } if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb)) return NF_DROP; nf_bridge_put(skb->nf_bridge); if (!nf_bridge_alloc(skb)) return NF_DROP; if (!setup_pre_routing(skb)) return NF_DROP; skb->protocol = htons(ETH_P_IPV6); NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_ipv6); return NF_STOLEN; } /* Direct IPv6 traffic to br_nf_pre_routing_ipv6. * Replicate the checks that IPv4 does on packet reception. * Set skb->dev to the bridge device (i.e. parent of the * receiving device) to make netfilter happy, the REDIRECT * target in particular. Save the original destination IP * address to be able to detect DNAT afterwards. */ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct net_bridge_port *p; struct net_bridge *br; __u32 len = nf_bridge_encap_header_len(skb); if (unlikely(!pskb_may_pull(skb, len))) return NF_DROP; p = br_port_get_rcu(in); if (p == NULL) return NF_DROP; br = p->br; if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) { if (!brnf_call_ip6tables && !br->nf_call_ip6tables) return NF_ACCEPT; nf_bridge_pull_encap_header_rcsum(skb); return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); } if (!brnf_call_iptables && !br->nf_call_iptables) return NF_ACCEPT; if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb)) return NF_ACCEPT; nf_bridge_pull_encap_header_rcsum(skb); if (br_parse_ip_options(skb)) return NF_DROP; nf_bridge_put(skb->nf_bridge); if (!nf_bridge_alloc(skb)) return NF_DROP; if (!setup_pre_routing(skb)) return NF_DROP; store_orig_dstaddr(skb); skb->protocol = htons(ETH_P_IP); NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish); return NF_STOLEN; } /* PF_BRIDGE/LOCAL_IN ************************************************/ /* The packet is locally destined, which requires a real * dst_entry, so detach the fake one. On the way up, the * packet would pass through PRE_ROUTING again (which already * took place when the packet entered the bridge), but we * register an IPv4 PRE_ROUTING 'sabotage' hook that will * prevent this from happening. */ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct rtable *rt = skb_rtable(skb); if (rt && rt == bridge_parent_rtable(in)) skb_dst_drop(skb); return NF_ACCEPT; } /* PF_BRIDGE/FORWARD *************************************************/ static int br_nf_forward_finish(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct net_device *in; if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) { in = nf_bridge->physindev; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge_update_protocol(skb); } else { in = *((struct net_device **)(skb->cb)); } nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in, skb->dev, br_forward_finish, 1); return 0; } /* This is the 'purely bridged' case. For IP, we pass the packet to * netfilter with indev and outdev set to the bridge device, * but we are still able to filter on the 'real' indev/outdev * because of the physdev module. For ARP, indev and outdev are the * bridge ports. */ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct nf_bridge_info *nf_bridge; struct net_device *parent; u_int8_t pf; if (!skb->nf_bridge) return NF_ACCEPT; /* Need exclusive nf_bridge_info since we might have multiple * different physoutdevs. */ if (!nf_bridge_unshare(skb)) return NF_DROP; parent = bridge_parent(out); if (!parent) return NF_DROP; if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) pf = PF_INET; else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) pf = PF_INET6; else return NF_ACCEPT; nf_bridge_pull_encap_header(skb); nf_bridge = skb->nf_bridge; if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } if (br_parse_ip_options(skb)) return NF_DROP; /* The physdev module checks on this */ nf_bridge->mask |= BRNF_BRIDGED; nf_bridge->physoutdev = skb->dev; if (pf == PF_INET) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent, br_nf_forward_finish); return NF_STOLEN; } static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct net_bridge_port *p; struct net_bridge *br; struct net_device **d = (struct net_device **)(skb->cb); p = br_port_get_rcu(out); if (p == NULL) return NF_ACCEPT; br = p->br; if (!brnf_call_arptables && !br->nf_call_arptables) return NF_ACCEPT; if (skb->protocol != htons(ETH_P_ARP)) { if (!IS_VLAN_ARP(skb)) return NF_ACCEPT; nf_bridge_pull_encap_header(skb); } if (arp_hdr(skb)->ar_pln != 4) { if (IS_VLAN_ARP(skb)) nf_bridge_push_encap_header(skb); return NF_ACCEPT; } *d = (struct net_device *)in; NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in, (struct net_device *)out, br_nf_forward_finish); return NF_STOLEN; } #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) static int br_nf_dev_queue_xmit(struct sk_buff *skb) { int ret; if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && !skb_is_gso(skb)) { if (br_parse_ip_options(skb)) /* Drop invalid packet */ return NF_DROP; ret = ip_fragment(skb, br_dev_queue_push_xmit); } else ret = br_dev_queue_push_xmit(skb); return ret; } #else static int br_nf_dev_queue_xmit(struct sk_buff *skb) { return br_dev_queue_push_xmit(skb); } #endif /* PF_BRIDGE/POST_ROUTING ********************************************/ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct net_device *realoutdev = bridge_parent(skb->dev); u_int8_t pf; if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED)) return NF_ACCEPT; if (!realoutdev) return NF_DROP; if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) pf = PF_INET; else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) pf = PF_INET6; else return NF_ACCEPT; /* We assume any code from br_dev_queue_push_xmit onwards doesn't care * about the value of skb->pkt_type. */ if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE; } nf_bridge_pull_encap_header(skb); nf_bridge_save_header(skb); if (pf == PF_INET) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev, br_nf_dev_queue_xmit); return NF_STOLEN; } /* IP/SABOTAGE *****************************************************/ /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING * for the second time. */ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { if (skb->nf_bridge && !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { return NF_STOP; } return NF_ACCEPT; } /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because * br_dev_queue_push_xmit is called afterwards */ static struct nf_hook_ops br_nf_ops[] __read_mostly = { { .hook = br_nf_pre_routing, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_PRE_ROUTING, .priority = NF_BR_PRI_BRNF, }, { .hook = br_nf_local_in, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_LOCAL_IN, .priority = NF_BR_PRI_BRNF, }, { .hook = br_nf_forward_ip, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_BRNF - 1, }, { .hook = br_nf_forward_arp, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_BRNF, }, { .hook = br_nf_post_routing, .owner = THIS_MODULE, .pf = PF_BRIDGE, .hooknum = NF_BR_POST_ROUTING, .priority = NF_BR_PRI_LAST, }, { .hook = ip_sabotage_in, .owner = THIS_MODULE, .pf = PF_INET, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP_PRI_FIRST, }, { .hook = ip_sabotage_in, .owner = THIS_MODULE, .pf = PF_INET6, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP6_PRI_FIRST, }, }; #ifdef CONFIG_SYSCTL static int brnf_sysctl_call_tables(ctl_table * ctl, int write, void __user * buffer, size_t * lenp, loff_t * ppos) { int ret; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (write && *(int *)(ctl->data)) *(int *)(ctl->data) = 1; return ret; } static ctl_table brnf_table[] = { { .procname = "bridge-nf-call-arptables", .data = &brnf_call_arptables, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-call-iptables", .data = &brnf_call_iptables, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-call-ip6tables", .data = &brnf_call_ip6tables, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-filter-vlan-tagged", .data = &brnf_filter_vlan_tagged, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { .procname = "bridge-nf-filter-pppoe-tagged", .data = &brnf_filter_pppoe_tagged, .maxlen = sizeof(int), .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, { } }; static struct ctl_path brnf_path[] = { { .procname = "net", }, { .procname = "bridge", }, { } }; #endif int __init br_netfilter_init(void) { int ret; ret = dst_entries_init(&fake_dst_ops); if (ret < 0) return ret; ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); if (ret < 0) { dst_entries_destroy(&fake_dst_ops); return ret; } #ifdef CONFIG_SYSCTL brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); if (brnf_sysctl_header == NULL) { printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); dst_entries_destroy(&fake_dst_ops); return -ENOMEM; } #endif printk(KERN_NOTICE "Bridge firewalling registered\n"); return 0; } void br_netfilter_fini(void) { nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); #ifdef CONFIG_SYSCTL unregister_sysctl_table(brnf_sysctl_header); #endif dst_entries_destroy(&fake_dst_ops); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3520_0
crossvul-cpp_data_good_2289_0
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2002 Intel Corp. * * This file is part of the SCTP kernel implementation * * These functions work with the state functions in sctp_sm_statefuns.c * to implement the state operations. These functions implement the * steps which require modifying existing data structures. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * C. Robin <chris@hundredacre.ac.uk> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Dajiang Zhang <dajiang.zhang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/skbuff.h> #include <linux/random.h> /* for get_random_bytes */ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen); static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len); static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp); static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data); static void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data); /* Control chunk destructor */ static void sctp_control_release_owner(struct sk_buff *skb) { /*TODO: do memory release */ } static void sctp_control_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sk_buff *skb = chunk->skb; /* TODO: properly account for control chunks. * To do it right we'll need: * 1) endpoint if association isn't known. * 2) proper memory accounting. * * For now don't do anything for now. */ skb->sk = asoc ? asoc->base.sk : NULL; skb->destructor = sctp_control_release_owner; } /* What was the inbound interface for this chunk? */ int sctp_chunk_iif(const struct sctp_chunk *chunk) { struct sctp_af *af; int iif = 0; af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version)); if (af) iif = af->skb_iif(chunk->skb); return iif; } /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 2: The ECN capable field is reserved for future use of * Explicit Congestion Notification. */ static const struct sctp_paramhdr ecap_param = { SCTP_PARAM_ECN_CAPABLE, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; static const struct sctp_paramhdr prsctp_param = { SCTP_PARAM_FWD_TSN_SUPPORT, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. */ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); } /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. Differs from sctp_init_cause in that it won't oops * if there isn't enough space in the op error chunk */ static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); if (skb_tailroom(chunk->skb) < len) return -ENOSPC; chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, sizeof(sctp_errhdr_t), &err); return 0; } /* 3.3.2 Initiation (INIT) (1) * * This chunk is used to initiate a SCTP association between two * endpoints. The format of the INIT chunk is shown below: * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initiate Tag | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Advertised Receiver Window Credit (a_rwnd) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Outbound Streams | Number of Inbound Streams | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initial TSN | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / Optional/Variable-Length Parameters / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * * The INIT chunk contains the following parameters. Unless otherwise * noted, each parameter MUST only be included once in the INIT chunk. * * Fixed Parameters Status * ---------------------------------------------- * Initiate Tag Mandatory * Advertised Receiver Window Credit Mandatory * Number of Outbound Streams Mandatory * Number of Inbound Streams Mandatory * Initial TSN Mandatory * * Variable Parameters Status Type Value * ------------------------------------------------------------- * IPv4 Address (Note 1) Optional 5 * IPv6 Address (Note 1) Optional 6 * Cookie Preservative Optional 9 * Reserved for ECN Capable (Note 2) Optional 32768 (0x8000) * Host Name Address (Note 3) Optional 11 * Supported Address Types (Note 4) Optional 12 */ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, const struct sctp_bind_addr *bp, gfp_t gfp, int vparam_len) { struct net *net = sock_net(asoc->base.sk); struct sctp_endpoint *ep = asoc->ep; sctp_inithdr_t init; union sctp_params addrs; size_t chunksize; struct sctp_chunk *retval = NULL; int num_types, addrs_len = 0; struct sctp_sock *sp; sctp_supported_addrs_param_t sat; __be16 types[2]; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL; /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 1: The INIT chunks can contain multiple addresses that * can be IPv4 and/or IPv6 in any combination. */ retval = NULL; /* Convert the provided bind address list to raw format. */ addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); init.init_tag = htonl(asoc->c.my_vtag); init.a_rwnd = htonl(asoc->rwnd); init.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); init.num_inbound_streams = htons(asoc->c.sinit_max_instreams); init.initial_tsn = htonl(asoc->c.initial_tsn); /* How many address types are needed? */ sp = sctp_sk(asoc->base.sk); num_types = sp->pf->supported_addrs(sp, types); chunksize = sizeof(init) + addrs_len; chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); chunksize += sizeof(ecap_param); if (net->sctp.prsctp_enable) chunksize += sizeof(prsctp_param); /* ADDIP: Section 4.2.7: * An implementation supporting this extension [ADDIP] MUST list * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and * INIT-ACK parameters. */ if (net->sctp.addip_enable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); chunksize += vparam_len; /* Account for AUTH related parameters */ if (ep->auth_enable) { /* Add random parameter length*/ chunksize += sizeof(asoc->c.auth_random); /* Add HMACS parameter length if any were defined */ auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; /* Add CHUNKS parameter length */ auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } /* If we have any extensions to report, account for that */ if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 3: An INIT chunk MUST NOT contain more than one Host * Name address parameter. Moreover, the sender of the INIT * MUST NOT combine any other address types with the Host Name * address in the INIT. The receiver of INIT MUST ignore any * other address types if the Host Name address parameter is * present in the received INIT chunk. * * PLEASE DO NOT FIXME [This version does not support Host Name.] */ retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize); if (!retval) goto nodata; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(init), &init); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 4: This parameter, when present, specifies all the * address types the sending endpoint can support. The absence * of this parameter indicates that the sending endpoint can * support any address type. */ sat.param_hdr.type = SCTP_PARAM_SUPPORTED_ADDRESS_TYPES; sat.param_hdr.length = htons(SCTP_SAT_LEN(num_types)); sctp_addto_chunk(retval, sizeof(sat), &sat); sctp_addto_chunk(retval, num_types * sizeof(__u16), &types); sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); /* Add the supported extensions parameter. Be nice and add this * fist before addiding the parameters for the extensions themselves */ if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (net->sctp.prsctp_enable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } /* Add SCTP-AUTH chunks to the parameter list */ if (ep->auth_enable) { sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), asoc->c.auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } nodata: kfree(addrs.v); return retval; } struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, gfp_t gfp, int unkparam_len) { sctp_inithdr_t initack; struct sctp_chunk *retval; union sctp_params addrs; struct sctp_sock *sp; int addrs_len; sctp_cookie_param_t *cookie; int cookie_len; size_t chunksize; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL, *auth_random = NULL; retval = NULL; /* Note: there may be no addresses to embed. */ addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); initack.init_tag = htonl(asoc->c.my_vtag); initack.a_rwnd = htonl(asoc->rwnd); initack.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); initack.num_inbound_streams = htons(asoc->c.sinit_max_instreams); initack.initial_tsn = htonl(asoc->c.initial_tsn); /* FIXME: We really ought to build the cookie right * into the packet instead of allocating more fresh memory. */ cookie = sctp_pack_cookie(asoc->ep, asoc, chunk, &cookie_len, addrs.v, addrs_len); if (!cookie) goto nomem_cookie; /* Calculate the total size of allocation, include the reserved * space for reporting unknown parameters if it is specified. */ sp = sctp_sk(asoc->base.sk); chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; /* Tell peer that we'll do ECN only if peer advertised such cap. */ if (asoc->peer.ecn_capable) chunksize += sizeof(ecap_param); if (asoc->peer.prsctp_capable) chunksize += sizeof(prsctp_param); if (asoc->peer.asconf_capable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); if (asoc->peer.auth_capable) { auth_random = (sctp_paramhdr_t *)asoc->c.auth_random; chunksize += ntohs(auth_random->length); auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* Now allocate and fill out the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize); if (!retval) goto nomem_chunk; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * * [INIT ACK back to where the INIT came from.] */ retval->transport = chunk->transport; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(initack), &initack); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); sctp_addto_chunk(retval, cookie_len, cookie); if (asoc->peer.ecn_capable) sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (asoc->peer.prsctp_capable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } if (asoc->peer.auth_capable) { sctp_addto_chunk(retval, ntohs(auth_random->length), auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } /* We need to remove the const qualifier at this point. */ retval->asoc = (struct sctp_association *) asoc; nomem_chunk: kfree(cookie); nomem_cookie: kfree(addrs.v); return retval; } /* 3.3.11 Cookie Echo (COOKIE ECHO) (10): * * This chunk is used only during the initialization of an association. * It is sent by the initiator of an association to its peer to complete * the initialization process. This chunk MUST precede any DATA chunk * sent within the association, but MAY be bundled with one or more DATA * chunks in the same packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 10 |Chunk Flags | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * / Cookie / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bit * * Set to zero on transmit and ignored on receipt. * * Length: 16 bits (unsigned integer) * * Set to the size of the chunk in bytes, including the 4 bytes of * the chunk header and the size of the Cookie. * * Cookie: variable size * * This field must contain the exact cookie received in the * State Cookie parameter from the previous INIT ACK. * * An implementation SHOULD make the cookie as small as possible * to insure interoperability. */ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; void *cookie; int cookie_len; cookie = asoc->peer.cookie; cookie_len = asoc->peer.cookie_len; /* Build a cookie echo chunk. */ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); if (!retval) goto nodata; retval->subh.cookie_hdr = sctp_addto_chunk(retval, cookie_len, cookie); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ECHO back to where the INIT ACK came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* 3.3.12 Cookie Acknowledgement (COOKIE ACK) (11): * * This chunk is used only during the initialization of an * association. It is used to acknowledge the receipt of a COOKIE * ECHO chunk. This chunk MUST precede any DATA or SACK chunk sent * within the association, but MAY be bundled with one or more DATA * chunks or SACK chunk in the same SCTP packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 11 |Chunk Flags | Length = 4 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bits * * Set to zero on transmit and ignored on receipt. */ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ACK back to where the COOKIE ECHO came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* * Appendix A: Explicit Congestion Notification: * CWR: * * RFC 2481 details a specific bit for a sender to send in the header of * its next outbound TCP segment to indicate to its peer that it has * reduced its congestion window. This is termed the CWR bit. For * SCTP the same indication is made by including the CWR chunk. * This chunk contains one data element, i.e. the TSN number that * was sent in the ECNE chunk. This element represents the lowest * TSN number in the datagram that was originally marked with the * CE bit. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Chunk Type=13 | Flags=00000000| Chunk Length = 8 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Lowest TSN Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Note: The CWR is considered a Control chunk. */ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, const __u32 lowest_tsn, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_cwrhdr_t cwr; cwr.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0, sizeof(sctp_cwrhdr_t)); if (!retval) goto nodata; retval->subh.ecn_cwr_hdr = sctp_addto_chunk(retval, sizeof(cwr), &cwr); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report a reduced congestion window back to where the ECNE * came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Make an ECNE chunk. This is a congestion experienced report. */ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, const __u32 lowest_tsn) { struct sctp_chunk *retval; sctp_ecnehdr_t ecne; ecne.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0, sizeof(sctp_ecnehdr_t)); if (!retval) goto nodata; retval->subh.ecne_hdr = sctp_addto_chunk(retval, sizeof(ecne), &ecne); nodata: return retval; } /* Make a DATA chunk for the given association from the provided * parameters. However, do not populate the data payload. */ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, int data_len, __u8 flags, __u16 ssn) { struct sctp_chunk *retval; struct sctp_datahdr dp; int chunk_len; /* We assign the TSN as LATE as possible, not here when * creating the chunk. */ dp.tsn = 0; dp.stream = htons(sinfo->sinfo_stream); dp.ppid = sinfo->sinfo_ppid; /* Set the flags for an unordered send. */ if (sinfo->sinfo_flags & SCTP_UNORDERED) { flags |= SCTP_DATA_UNORDERED; dp.ssn = 0; } else dp.ssn = htons(ssn); chunk_len = sizeof(dp) + data_len; retval = sctp_make_data(asoc, flags, chunk_len); if (!retval) goto nodata; retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); nodata: return retval; } /* Create a selective ackowledgement (SACK) for the given * association. This reports on which TSN's we've seen to date, * including duplicates and gaps. */ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_sackhdr sack; int len; __u32 ctsn; __u16 num_gabs, num_dup_tsns; struct sctp_association *aptr = (struct sctp_association *)asoc; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; struct sctp_transport *trans; memset(gabs, 0, sizeof(gabs)); ctsn = sctp_tsnmap_get_ctsn(map); pr_debug("%s: sackCTSNAck sent:0x%x\n", __func__, ctsn); /* How much room is needed in the chunk? */ num_gabs = sctp_tsnmap_num_gabs(map, gabs); num_dup_tsns = sctp_tsnmap_num_dups(map); /* Initialize the SACK header. */ sack.cum_tsn_ack = htonl(ctsn); sack.a_rwnd = htonl(asoc->a_rwnd); sack.num_gap_ack_blocks = htons(num_gabs); sack.num_dup_tsns = htons(num_dup_tsns); len = sizeof(sack) + sizeof(struct sctp_gap_ack_block) * num_gabs + sizeof(__u32) * num_dup_tsns; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk to * which it is replying. This rule should also be followed if * the endpoint is bundling DATA chunks together with the * reply chunk. * * However, when acknowledging multiple DATA chunks received * in packets from different source addresses in a single * SACK, the SACK chunk may be transmitted to one of the * destination transport addresses from which the DATA or * control chunks being acknowledged were received. * * [BUG: We do not implement the following paragraph. * Perhaps we should remember the last transport we used for a * SACK and avoid that (if possible) if we have seen any * duplicates. --piggy] * * When a receiver of a duplicate DATA chunk sends a SACK to a * multi- homed endpoint it MAY be beneficial to vary the * destination address and not use the source address of the * DATA chunk. The reason being that receiving a duplicate * from a multi-homed endpoint might indicate that the return * path (as specified in the source address of the DATA chunk) * for the SACK is broken. * * [Send to the address from which we last received a DATA chunk.] */ retval->transport = asoc->peer.last_data_from; retval->subh.sack_hdr = sctp_addto_chunk(retval, sizeof(sack), &sack); /* Add the gap ack block information. */ if (num_gabs) sctp_addto_chunk(retval, sizeof(__u32) * num_gabs, gabs); /* Add the duplicate TSN information. */ if (num_dup_tsns) { aptr->stats.idupchunks += num_dup_tsns; sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, sctp_tsnmap_get_dups(map)); } /* Once we have a sack generated, check to see what our sack * generation is, if its 0, reset the transports to 0, and reset * the association generation to 1 * * The idea is that zero is never used as a valid generation for the * association so no transport will match after a wrap event like this, * Until the next sack */ if (++aptr->peer.sack_generation == 0) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) trans->sack_generation = 0; aptr->peer.sack_generation = 1; } nodata: return retval; } /* Make a SHUTDOWN chunk. */ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_shutdownhdr_t shut; __u32 ctsn; ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, sizeof(sctp_shutdownhdr_t)); if (!retval) goto nodata; retval->subh.shutdown_hdr = sctp_addto_chunk(retval, sizeof(shut), &shut); if (chunk) retval->transport = chunk->transport; nodata: return retval; } struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ACK back to where the SHUTDOWN came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } struct sctp_chunk *sctp_make_shutdown_complete( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association (vtag will be * reflected) */ flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report SHUTDOWN COMPLETE back to where the SHUTDOWN ACK * came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Create an ABORT. Note that we set the T bit if we have no * association, except when responding to an INIT (sctpimpguide 2.41). */ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const size_t hint) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association and 'chunk' is not * an INIT (vtag will be reflected). */ if (!asoc) { if (chunk && chunk->chunk_hdr && chunk->chunk_hdr->type == SCTP_CID_INIT) flags = 0; else flags = SCTP_CHUNK_FLAG_T; } retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Helper to create ABORT with a NO_USER_DATA error. */ struct sctp_chunk *sctp_make_abort_no_data( const struct sctp_association *asoc, const struct sctp_chunk *chunk, __u32 tsn) { struct sctp_chunk *retval; __be32 payload; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + sizeof(tsn)); if (!retval) goto no_mem; /* Put the tsn back into network byte order. */ payload = htonl(tsn); sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (chunk) retval->transport = chunk->transport; no_mem: return retval; } /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, const struct msghdr *msg, size_t paylen) { struct sctp_chunk *retval; void *payload = NULL; int err; retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); if (!retval) goto err_chunk; if (paylen) { /* Put the msg_iov together into payload. */ payload = kmalloc(paylen, GFP_KERNEL); if (!payload) goto err_payload; err = memcpy_fromiovec(payload, msg->msg_iov, paylen); if (err < 0) goto err_copy; } sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); sctp_addto_chunk(retval, paylen, payload); if (paylen) kfree(payload); return retval; err_copy: kfree(payload); err_payload: sctp_chunk_free(retval); retval = NULL; err_chunk: return retval; } /* Append bytes to the end of a parameter. Will panic if chunk is not big * enough. */ static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) { void *target; int chunklen = ntohs(chunk->chunk_hdr->length); target = skb_put(chunk->skb, len); if (data) memcpy(target, data, len); else memset(target, 0, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ struct sctp_chunk *sctp_make_abort_violation( const struct sctp_association *asoc, const struct sctp_chunk *chunk, const __u8 *payload, const size_t paylen) { struct sctp_chunk *retval; struct sctp_paramhdr phdr; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen + sizeof(sctp_paramhdr_t)); if (!retval) goto end; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen + sizeof(sctp_paramhdr_t)); phdr.type = htons(chunk->chunk_hdr->type); phdr.length = chunk->chunk_hdr->length; sctp_addto_chunk(retval, paylen, payload); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); end: return retval; } struct sctp_chunk *sctp_make_violation_paramlen( const struct sctp_association *asoc, const struct sctp_chunk *chunk, struct sctp_paramhdr *param) { struct sctp_chunk *retval; static const char error[] = "The following parameter had invalid length:"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) + sizeof(sctp_paramhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error) + sizeof(sctp_paramhdr_t)); sctp_addto_chunk(retval, sizeof(error), error); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), param); nodata: return retval; } struct sctp_chunk *sctp_make_violation_max_retrans( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; static const char error[] = "Association exceeded its max_retans count"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error)); sctp_addto_chunk(retval, sizeof(error), error); nodata: return retval; } /* Make a HEARTBEAT chunk. */ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, const struct sctp_transport *transport) { struct sctp_chunk *retval; sctp_sender_hb_info_t hbinfo; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo)); if (!retval) goto nodata; hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO; hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); hbinfo.daddr = transport->ipaddr; hbinfo.sent_at = jiffies; hbinfo.hb_nonce = transport->hb_nonce; /* Cast away the 'const', as this is just telling the chunk * what transport it belongs to. */ retval->transport = (struct sctp_transport *) transport; retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo), &hbinfo); nodata: return retval; } struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const void *payload, const size_t paylen) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); if (!retval) goto nodata; retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [HBACK back to where the HEARTBEAT came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk with the specified space reserved. * This routine can be used for containing multiple causes in the chunk. */ static struct sctp_chunk *sctp_make_op_error_space( const struct sctp_association *asoc, const struct sctp_chunk *chunk, size_t size) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, sizeof(sctp_errhdr_t) + size); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk of a fixed size, * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) * This is a helper function to allocate an error chunk for * for those invalid parameter codes in which we may not want * to report all the errors, if the incoming chunk is large */ static inline struct sctp_chunk *sctp_make_op_error_fixed( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { size_t size = asoc ? asoc->pathmtu : 0; if (!size) size = SCTP_DEFAULT_MAXSEGMENT; return sctp_make_op_error_space(asoc, chunk, size); } /* Create an Operation Error chunk. */ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, const struct sctp_chunk *chunk, __be16 cause_code, const void *payload, size_t paylen, size_t reserve_tail) { struct sctp_chunk *retval; retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail); if (!retval) goto nodata; sctp_init_cause(retval, cause_code, paylen + reserve_tail); sctp_addto_chunk(retval, paylen, payload); if (reserve_tail) sctp_addto_param(retval, reserve_tail, NULL); nodata: return retval; } struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_hmac *hmac_desc; struct sctp_authhdr auth_hdr; __u8 *hmac; /* Get the first hmac that the peer told us to use */ hmac_desc = sctp_auth_asoc_get_hmac(asoc); if (unlikely(!hmac_desc)) return NULL; retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0, hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); if (!retval) return NULL; auth_hdr.hmac_id = htons(hmac_desc->hmac_id); auth_hdr.shkey_id = htons(asoc->active_key_id); retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t), &auth_hdr); hmac = skb_put(retval->skb, hmac_desc->hmac_len); memset(hmac, 0, hmac_desc->hmac_len); /* Adjust the chunk header to include the empty MAC */ retval->chunk_hdr->length = htons(ntohs(retval->chunk_hdr->length) + hmac_desc->hmac_len); retval->chunk_end = skb_tail_pointer(retval->skb); return retval; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* Turn an skb into a chunk. * FIXME: Eventually move the structure directly inside the skb->cb[]. * * sctpimpguide-05.txt Section 2.8.2 * M1) Each time a new DATA chunk is transmitted * set the 'TSN.Missing.Report' count for that TSN to 0. The * 'TSN.Missing.Report' count will be used to determine missing chunks * and when to fast retransmit. * */ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, const struct sctp_association *asoc, struct sock *sk) { struct sctp_chunk *retval; retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC); if (!retval) goto nodata; if (!sk) pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb); INIT_LIST_HEAD(&retval->list); retval->skb = skb; retval->asoc = (struct sctp_association *)asoc; retval->singleton = 1; retval->fast_retransmit = SCTP_CAN_FRTX; /* Polish the bead hole. */ INIT_LIST_HEAD(&retval->transmitted_list); INIT_LIST_HEAD(&retval->frag_list); SCTP_DBG_OBJCNT_INC(chunk); atomic_set(&retval->refcnt, 1); nodata: return retval; } /* Set chunk->source and dest based on the IP header in chunk->skb. */ void sctp_init_addrs(struct sctp_chunk *chunk, union sctp_addr *src, union sctp_addr *dest) { memcpy(&chunk->source, src, sizeof(union sctp_addr)); memcpy(&chunk->dest, dest, sizeof(union sctp_addr)); } /* Extract the source address from a chunk. */ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) { /* If we have a known transport, use that. */ if (chunk->transport) { return &chunk->transport->ipaddr; } else { /* Otherwise, extract it from the IP header. */ return &chunk->source; } } /* Create a new chunk, setting the type and flags headers from the * arguments, reserving enough space for a 'paylen' byte payload. */ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *retval; sctp_chunkhdr_t *chunk_hdr; struct sk_buff *skb; struct sock *sk; /* No need to allocate LL here, as this is only a chunk. */ skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), GFP_ATOMIC); if (!skb) goto nodata; /* Make room for the chunk header. */ chunk_hdr = (sctp_chunkhdr_t *)skb_put(skb, sizeof(sctp_chunkhdr_t)); chunk_hdr->type = type; chunk_hdr->flags = flags; chunk_hdr->length = htons(sizeof(sctp_chunkhdr_t)); sk = asoc ? asoc->base.sk : NULL; retval = sctp_chunkify(skb, asoc, sk); if (!retval) { kfree_skb(skb); goto nodata; } retval->chunk_hdr = chunk_hdr; retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(struct sctp_chunkhdr); /* Determine if the chunk needs to be authenticated */ if (sctp_auth_send_cid(type, asoc)) retval->auth = 1; return retval; nodata: return NULL; } static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen) { return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen); } static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen); if (chunk) sctp_control_set_owner_w(chunk); return chunk; } /* Release the memory occupied by a chunk. */ static void sctp_chunk_destroy(struct sctp_chunk *chunk) { BUG_ON(!list_empty(&chunk->list)); list_del_init(&chunk->transmitted_list); consume_skb(chunk->skb); consume_skb(chunk->auth_chunk); SCTP_DBG_OBJCNT_DEC(chunk); kmem_cache_free(sctp_chunk_cachep, chunk); } /* Possibly, free the chunk. */ void sctp_chunk_free(struct sctp_chunk *chunk) { /* Release our reference on the message tracker. */ if (chunk->msg) sctp_datamsg_put(chunk->msg); sctp_chunk_put(chunk); } /* Grab a reference to the chunk. */ void sctp_chunk_hold(struct sctp_chunk *ch) { atomic_inc(&ch->refcnt); } /* Release a reference to the chunk. */ void sctp_chunk_put(struct sctp_chunk *ch) { if (atomic_dec_and_test(&ch->refcnt)) sctp_chunk_destroy(ch); } /* Append bytes to the end of a chunk. Will panic if chunk is not big * enough. */ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) { void *target; void *padding; int chunklen = ntohs(chunk->chunk_hdr->length); int padlen = WORD_ROUND(chunklen) - chunklen; padding = skb_put(chunk->skb, padlen); target = skb_put(chunk->skb, len); memset(padding, 0, padlen); memcpy(target, data, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + padlen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient * space in the chunk */ static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, int len, const void *data) { if (skb_tailroom(chunk->skb) >= len) return sctp_addto_chunk(chunk, len, data); else return NULL; } /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. */ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, struct iovec *data) { __u8 *target; int err = 0; /* Make room in chunk for data. */ target = skb_put(chunk->skb, len); /* Copy data (whole iovec) into chunk */ if ((err = memcpy_fromiovecend(target, data, off, len))) goto out; /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(ntohs(chunk->chunk_hdr->length) + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); out: return err; } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) { struct sctp_datamsg *msg; struct sctp_chunk *lchunk; struct sctp_stream *stream; __u16 ssn; __u16 sid; if (chunk->has_ssn) return; /* All fragments will be on the same stream */ sid = ntohs(chunk->subh.data_hdr->stream); stream = &chunk->asoc->ssnmap->out; /* Now assign the sequence number to the entire message. * All fragments must have the same stream sequence number. */ msg = chunk->msg; list_for_each_entry(lchunk, &msg->chunks, frag_list) { if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { ssn = 0; } else { if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) ssn = sctp_ssn_next(stream, sid); else ssn = sctp_ssn_peek(stream, sid); } lchunk->subh.data_hdr->ssn = htons(ssn); lchunk->has_ssn = 1; } } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) { if (!chunk->has_tsn) { /* This is the last possible instant to * assign a TSN. */ chunk->subh.data_hdr->tsn = htonl(sctp_association_get_next_tsn(chunk->asoc)); chunk->has_tsn = 1; } } /* Create a CLOSED association to use with an incoming packet. */ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_association *asoc; struct sk_buff *skb; sctp_scope_t scope; struct sctp_af *af; /* Create the bare association. */ scope = sctp_scope(sctp_source(chunk)); asoc = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!asoc) goto nodata; asoc->temp = 1; skb = chunk->skb; /* Create an entry for the source address of the packet. */ af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version)); if (unlikely(!af)) goto fail; af->from_skb(&asoc->c.peer_addr, skb, 1); nodata: return asoc; fail: sctp_association_free(asoc); return NULL; } /* Build a cookie representing asoc. * This INCLUDES the param header needed to put the cookie in the INIT ACK. */ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len) { sctp_cookie_param_t *retval; struct sctp_signed_cookie *cookie; struct scatterlist sg; int headersize, bodysize; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_paramhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = sizeof(struct sctp_cookie) + ntohs(init_chunk->chunk_hdr->length) + addrs_len; /* Pad out the cookie to a multiple to make the signature * functions simpler to write. */ if (bodysize % SCTP_COOKIE_MULTIPLE) bodysize += SCTP_COOKIE_MULTIPLE - (bodysize % SCTP_COOKIE_MULTIPLE); *cookie_len = headersize + bodysize; /* Clear this memory since we are sending this data structure * out on the network. */ retval = kzalloc(*cookie_len, GFP_ATOMIC); if (!retval) goto nodata; cookie = (struct sctp_signed_cookie *) retval->body; /* Set up the parameter header. */ retval->p.type = SCTP_PARAM_STATE_COOKIE; retval->p.length = htons(*cookie_len); /* Copy the cookie part of the association itself. */ cookie->c = asoc->c; /* Save the raw address list length in the cookie. */ cookie->c.raw_addr_list_len = addrs_len; /* Remember PR-SCTP capability. */ cookie->c.prsctp_capable = asoc->peer.prsctp_capable; /* Save adaptation indication in the cookie. */ cookie->c.adaptation_ind = asoc->peer.adaptation_ind; /* Set an expiration time for the cookie. */ cookie->c.expiration = ktime_add(asoc->cookie_life, ktime_get()); /* Copy the peer's init packet. */ memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, ntohs(init_chunk->chunk_hdr->length)); /* Copy the raw local address list of the association. */ memcpy((__u8 *)&cookie->c.peer_init[0] + ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); if (sctp_sk(ep->base.sk)->hmac) { struct hash_desc desc; /* Sign the message. */ sg_init_one(&sg, &cookie->c, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) goto free_cookie; } return retval; free_cookie: kfree(retval); nodata: *cookie_len = 0; return NULL; } /* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ struct sctp_association *sctp_unpack_cookie( const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, gfp_t gfp, int *error, struct sctp_chunk **errp) { struct sctp_association *retval = NULL; struct sctp_signed_cookie *cookie; struct sctp_cookie *bear_cookie; int headersize, bodysize, fixed_size; __u8 *digest = ep->digest; struct scatterlist sg; unsigned int len; sctp_scope_t scope; struct sk_buff *skb = chunk->skb; ktime_t kt; struct hash_desc desc; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_chunkhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = ntohs(chunk->chunk_hdr->length) - headersize; fixed_size = headersize + sizeof(struct sctp_cookie); /* Verify that the chunk looks like it even has a cookie. * There must be enough room for our cookie and our peer's * INIT chunk. */ len = ntohs(chunk->chunk_hdr->length); if (len < fixed_size + sizeof(struct sctp_chunkhdr)) goto malformed; /* Verify that the cookie has been padded out. */ if (bodysize % SCTP_COOKIE_MULTIPLE) goto malformed; /* Process the cookie. */ cookie = chunk->subh.cookie_hdr; bear_cookie = &cookie->c; if (!sctp_sk(ep->base.sk)->hmac) goto no_hmac; /* Check the signature. */ sg_init_one(&sg, bear_cookie, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; memset(digest, 0x00, SCTP_SIGNATURE_SIZE); if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, digest)) { *error = -SCTP_IERROR_NOMEM; goto fail; } if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { *error = -SCTP_IERROR_BAD_SIG; goto fail; } no_hmac: /* IG Section 2.35.2: * 3) Compare the port numbers and the verification tag contained * within the COOKIE ECHO chunk to the actual port numbers and the * verification tag within the SCTP common header of the received * packet. If these values do not match the packet MUST be silently * discarded, */ if (ntohl(chunk->sctp_hdr->vtag) != bear_cookie->my_vtag) { *error = -SCTP_IERROR_BAD_TAG; goto fail; } if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port || ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { *error = -SCTP_IERROR_BAD_PORTS; goto fail; } /* Check to see if the cookie is stale. If there is already * an association, there is no need to check cookie's expiration * for init collision case of lost COOKIE ACK. * If skb has been timestamped, then use the stamp, otherwise * use current time. This introduces a small possibility that * that a cookie may be considered expired, but his would only slow * down the new association establishment instead of every packet. */ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) kt = skb_get_ktime(skb); else kt = ktime_get(); if (!asoc && ktime_before(bear_cookie->expiration, kt)) { /* * Section 3.3.10.3 Stale Cookie Error (3) * * Cause of error * --------------- * Stale Cookie Error: Indicates the receipt of a valid State * Cookie that has expired. */ len = ntohs(chunk->chunk_hdr->length); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); __be32 n = htonl(usecs); sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, sizeof(n)); sctp_addto_chunk(*errp, sizeof(n), &n); *error = -SCTP_IERROR_STALE_COOKIE; } else *error = -SCTP_IERROR_NOMEM; goto fail; } /* Make a new base association. */ scope = sctp_scope(sctp_source(chunk)); retval = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!retval) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Set up our peer's port number. */ retval->peer.port = ntohs(chunk->sctp_hdr->source); /* Populate the association from the cookie. */ memcpy(&retval->c, bear_cookie, sizeof(*bear_cookie)); if (sctp_assoc_set_bind_addr_from_cookie(retval, bear_cookie, GFP_ATOMIC) < 0) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Also, add the destination address. */ if (list_empty(&retval->base.bind_addr.address_list)) { sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, SCTP_ADDR_SRC, GFP_ATOMIC); } retval->next_tsn = retval->c.initial_tsn; retval->ctsn_ack_point = retval->next_tsn - 1; retval->addip_serial = retval->c.initial_tsn; retval->adv_peer_ack_point = retval->ctsn_ack_point; retval->peer.prsctp_capable = retval->c.prsctp_capable; retval->peer.adaptation_ind = retval->c.adaptation_ind; /* The INIT stuff will be done by the side effects. */ return retval; fail: if (retval) sctp_association_free(retval); return NULL; malformed: /* Yikes! The packet is either corrupt or deliberately * malformed. */ *error = -SCTP_IERROR_MALFORMED; goto fail; } /******************************************************************** * 3rd Level Abstractions ********************************************************************/ struct __sctp_missing { __be32 num_missing; __be16 type; } __packed; /* * Report a missing mandatory parameter. */ static int sctp_process_missing_param(const struct sctp_association *asoc, sctp_param_t paramtype, struct sctp_chunk *chunk, struct sctp_chunk **errp) { struct __sctp_missing report; __u16 len; len = WORD_ROUND(sizeof(report)); /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { report.num_missing = htonl(1); report.type = paramtype; sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, sizeof(report)); sctp_addto_chunk(*errp, sizeof(report), &report); } /* Stop processing this chunk. */ return 0; } /* Report an Invalid Mandatory Parameter. */ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* Invalid Mandatory Parameter Error has no payload. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, 0); if (*errp) sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); /* Stop processing this chunk. */ return 0; } static int sctp_process_inv_paramlength(const struct sctp_association *asoc, struct sctp_paramhdr *param, const struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* This is a fatal error. Any accumulated non-fatal errors are * not reported. */ if (*errp) sctp_chunk_free(*errp); /* Create an error chunk and fill it in with our payload. */ *errp = sctp_make_violation_paramlen(asoc, chunk, param); return 0; } /* Do not attempt to handle the HOST_NAME parm. However, do * send back an indicator to the peer. */ static int sctp_process_hn_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { __u16 len = ntohs(param.p->length); /* Processing of the HOST_NAME parameter will generate an * ABORT. If we've accumulated any non-fatal errors, they * would be unrecognized parameters and we should not include * them in the ABORT. */ if (*errp) sctp_chunk_free(*errp); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); sctp_addto_chunk(*errp, len, param.v); } /* Stop processing this chunk. */ return 0; } static int sctp_verify_ext_param(struct net *net, union sctp_params param) { __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int have_auth = 0; int have_asconf = 0; int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_AUTH: have_auth = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: have_asconf = 1; break; } } /* ADD-IP Security: The draft requires us to ABORT or ignore the * INIT/INIT-ACK if ADD-IP is listed, but AUTH is not. Do this * only if ADD-IP is turned on and we are not backward-compatible * mode. */ if (net->sctp.addip_noauth) return 1; if (net->sctp.addip_enable && !have_auth && have_asconf) return 0; return 1; } static void sctp_process_ext_param(struct sctp_association *asoc, union sctp_params param) { struct net *net = sock_net(asoc->base.sk); __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_FWD_TSN: if (net->sctp.prsctp_enable && !asoc->peer.prsctp_capable) asoc->peer.prsctp_capable = 1; break; case SCTP_CID_AUTH: /* if the peer reports AUTH, assume that he * supports AUTH. */ if (asoc->ep->auth_enable) asoc->peer.auth_capable = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: if (net->sctp.addip_enable) asoc->peer.asconf_capable = 1; break; default: break; } } } /* RFC 3.2.1 & the Implementers Guide 2.2. * * The Parameter Types are encoded such that the * highest-order two bits specify the action that must be * taken if the processing endpoint does not recognize the * Parameter Type. * * 00 - Stop processing this parameter; do not process any further * parameters within this chunk * * 01 - Stop processing this parameter, do not process any further * parameters within this chunk, and report the unrecognized * parameter in an 'Unrecognized Parameter' ERROR chunk. * * 10 - Skip this parameter and continue processing. * * 11 - Skip this parameter and continue processing but * report the unrecognized parameter in an * 'Unrecognized Parameter' ERROR chunk. * * Return value: * SCTP_IERROR_NO_ERROR - continue with the chunk * SCTP_IERROR_ERROR - stop and report an error. * SCTP_IERROR_NOMEME - out of memory. */ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { int retval = SCTP_IERROR_NO_ERROR; switch (param.p->type & SCTP_PARAM_ACTION_MASK) { case SCTP_PARAM_ACTION_DISCARD: retval = SCTP_IERROR_ERROR; break; case SCTP_PARAM_ACTION_SKIP: break; case SCTP_PARAM_ACTION_DISCARD_ERR: retval = SCTP_IERROR_ERROR; /* Fall through */ case SCTP_PARAM_ACTION_SKIP_ERR: /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (NULL == *errp) *errp = sctp_make_op_error_fixed(asoc, chunk); if (*errp) { if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, WORD_ROUND(ntohs(param.p->length)))) sctp_addto_chunk_fixed(*errp, WORD_ROUND(ntohs(param.p->length)), param.v); } else { /* If there is no memory for generating the ERROR * report as specified, an ABORT will be triggered * to the peer and the association won't be * established. */ retval = SCTP_IERROR_NOMEM; } break; default: break; } return retval; } /* Verify variable length parameters * Return values: * SCTP_IERROR_ABORT - trigger an ABORT * SCTP_IERROR_NOMEM - out of memory (abort) * SCTP_IERROR_ERROR - stop processing, trigger an ERROR * SCTP_IERROR_NO_ERROR - continue with the chunk */ static sctp_ierror_t sctp_verify_param(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, union sctp_params param, sctp_cid_t cid, struct sctp_chunk *chunk, struct sctp_chunk **err_chunk) { struct sctp_hmac_algo_param *hmacs; int retval = SCTP_IERROR_NO_ERROR; __u16 n_elt, id = 0; int i; /* FIXME - This routine is not looking at each parameter per the * chunk type, i.e., unrecognized parameters should be further * identified based on the chunk id. */ switch (param.p->type) { case SCTP_PARAM_IPV4_ADDRESS: case SCTP_PARAM_IPV6_ADDRESS: case SCTP_PARAM_COOKIE_PRESERVATIVE: case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: case SCTP_PARAM_STATE_COOKIE: case SCTP_PARAM_HEARTBEAT_INFO: case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: case SCTP_PARAM_ECN_CAPABLE: case SCTP_PARAM_ADAPTATION_LAYER_IND: break; case SCTP_PARAM_SUPPORTED_EXT: if (!sctp_verify_ext_param(net, param)) return SCTP_IERROR_ABORT; break; case SCTP_PARAM_SET_PRIMARY: if (net->sctp.addip_enable) break; goto fallthrough; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ sctp_process_hn_param(asoc, param, chunk, err_chunk); retval = SCTP_IERROR_ABORT; break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) break; goto fallthrough; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Secion 6.1 * If the random number is not 32 byte long the association * MUST be aborted. The ABORT chunk SHOULD contain the error * cause 'Protocol Violation'. */ if (SCTP_AUTH_RANDOM_LENGTH != ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Section 3.2 * The CHUNKS parameter MUST be included once in the INIT or * INIT-ACK chunk if the sender wants to receive authenticated * chunks. Its maximum length is 260 bytes. */ if (260 < ntohs(param.p->length)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fallthrough; hmacs = (struct sctp_hmac_algo_param *)param.p; n_elt = (ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) >> 1; /* SCTP-AUTH: Section 6.1 * The HMAC algorithm based on SHA-1 MUST be supported and * included in the HMAC-ALGO parameter. */ for (i = 0; i < n_elt; i++) { id = ntohs(hmacs->hmac_ids[i]); if (id == SCTP_AUTH_HMAC_ID_SHA1) break; } if (id != SCTP_AUTH_HMAC_ID_SHA1) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; fallthrough: default: pr_debug("%s: unrecognized param:%d for chunk:%d\n", __func__, ntohs(param.p->type), cid); retval = sctp_process_unk_param(asoc, param, chunk, err_chunk); break; } return retval; } /* Verify the INIT packet before we process it. */ int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, sctp_cid_t cid, sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, struct sctp_chunk **errp) { union sctp_params param; bool has_cookie = false; int result; /* Check for missing mandatory parameters. Note: Initial TSN is * also mandatory, but is not checked here since the valid range * is 0..2**32-1. RFC4960, section 3.3.3. */ if (peer_init->init_hdr.num_outbound_streams == 0 || peer_init->init_hdr.num_inbound_streams == 0 || peer_init->init_hdr.init_tag == 0 || ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW) return sctp_process_inv_mandatory(asoc, chunk, errp); sctp_walk_params(param, peer_init, init_hdr.params) { if (param.p->type == SCTP_PARAM_STATE_COOKIE) has_cookie = true; } /* There is a possibility that a parameter length was bad and * in that case we would have stoped walking the parameters. * The current param.p would point at the bad one. * Current consensus on the mailing list is to generate a PROTOCOL * VIOLATION error. We build the ERROR chunk here and let the normal * error handling code build and send the packet. */ if (param.v != (void *)chunk->chunk_end) return sctp_process_inv_paramlength(asoc, param.p, chunk, errp); /* The only missing mandatory param possible today is * the state cookie for an INIT-ACK chunk. */ if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, chunk, errp); /* Verify all the variable length parameters */ sctp_walk_params(param, peer_init, init_hdr.params) { result = sctp_verify_param(net, ep, asoc, param, cid, chunk, errp); switch (result) { case SCTP_IERROR_ABORT: case SCTP_IERROR_NOMEM: return 0; case SCTP_IERROR_ERROR: return 1; case SCTP_IERROR_NO_ERROR: default: break; } } /* for (loop through all parameters) */ return 1; } /* Unpack the parameters in an INIT packet into an association. * Returns 0 on failure, else success. * FIXME: This is an association method. */ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, const union sctp_addr *peer_addr, sctp_init_chunk_t *peer_init, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_params param; struct sctp_transport *transport; struct list_head *pos, *temp; struct sctp_af *af; union sctp_addr addr; char *cookie; int src_match = 0; /* We must include the address that the INIT packet came from. * This is the only address that matters for an INIT packet. * When processing a COOKIE ECHO, we retrieve the from address * of the INIT from the cookie. */ /* This implementation defaults to making the first transport * added as the primary transport. The source address seems to * be a a better choice than any of the embedded addresses. */ if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) goto nomem; if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr)) src_match = 1; /* Process the initialization parameters. */ sctp_walk_params(param, peer_init, init_hdr.params) { if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS || param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, chunk->sctp_hdr->source, 0); if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) src_match = 1; } if (!sctp_process_param(asoc, param, peer_addr, gfp)) goto clean_up; } /* source address of chunk may not match any valid address */ if (!src_match) goto clean_up; /* AUTH: After processing the parameters, make sure that we * have all the required info to potentially do authentications. */ if (asoc->peer.auth_capable && (!asoc->peer.peer_random || !asoc->peer.peer_hmacs)) asoc->peer.auth_capable = 0; /* In a non-backward compatible mode, if the peer claims * support for ADD-IP but not AUTH, the ADD-IP spec states * that we MUST ABORT the association. Section 6. The section * also give us an option to silently ignore the packet, which * is what we'll do here. */ if (!net->sctp.addip_noauth && (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | SCTP_PARAM_DEL_IP | SCTP_PARAM_SET_PRIMARY); asoc->peer.asconf_capable = 0; goto clean_up; } /* Walk list of transports, removing transports in the UNKNOWN state. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state == SCTP_UNKNOWN) { sctp_assoc_rm_peer(asoc, transport); } } /* The fixed INIT headers are always in network byte * order. */ asoc->peer.i.init_tag = ntohl(peer_init->init_hdr.init_tag); asoc->peer.i.a_rwnd = ntohl(peer_init->init_hdr.a_rwnd); asoc->peer.i.num_outbound_streams = ntohs(peer_init->init_hdr.num_outbound_streams); asoc->peer.i.num_inbound_streams = ntohs(peer_init->init_hdr.num_inbound_streams); asoc->peer.i.initial_tsn = ntohl(peer_init->init_hdr.initial_tsn); /* Apply the upper bounds for output streams based on peer's * number of inbound streams. */ if (asoc->c.sinit_num_ostreams > ntohs(peer_init->init_hdr.num_inbound_streams)) { asoc->c.sinit_num_ostreams = ntohs(peer_init->init_hdr.num_inbound_streams); } if (asoc->c.sinit_max_instreams > ntohs(peer_init->init_hdr.num_outbound_streams)) { asoc->c.sinit_max_instreams = ntohs(peer_init->init_hdr.num_outbound_streams); } /* Copy Initiation tag from INIT to VT_peer in cookie. */ asoc->c.peer_vtag = asoc->peer.i.init_tag; /* Peer Rwnd : Current calculated value of the peer's rwnd. */ asoc->peer.rwnd = asoc->peer.i.a_rwnd; /* Copy cookie in case we need to resend COOKIE-ECHO. */ cookie = asoc->peer.cookie; if (cookie) { asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); if (!asoc->peer.cookie) goto clean_up; } /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily * high (for example, implementations MAY use the size of the receiver * advertised window). */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { transport->ssthresh = asoc->peer.i.a_rwnd; } /* Set up the TSN tracking pieces. */ if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, asoc->peer.i.initial_tsn, gfp)) goto clean_up; /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * * The stream sequence number in all the streams shall start * from 0 when the association is established. Also, when the * stream sequence number reaches the value 65535 the next * stream sequence number shall be set to 0. */ /* Allocate storage for the negotiated streams if it is not a temporary * association. */ if (!asoc->temp) { int error; asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, asoc->c.sinit_num_ostreams, gfp); if (!asoc->ssnmap) goto clean_up; error = sctp_assoc_set_id(asoc, gfp); if (error) goto clean_up; } /* ADDIP Section 4.1 ASCONF Chunk Procedures * * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do the following: * ... * A2) A serial number should be assigned to the Chunk. The serial * number should be a monotonically increasing number. All serial * numbers are defined to be initialized at the start of the * association to the same value as the Initial TSN. */ asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; return 1; clean_up: /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state != SCTP_ACTIVE) sctp_assoc_rm_peer(asoc, transport); } nomem: return 0; } /* Update asoc with the option described in param. * * RFC2960 3.3.2.1 Optional/Variable Length Parameters in INIT * * asoc is the association to update. * param is the variable length parameter to use for update. * cid tells us if this is an INIT, INIT ACK or COOKIE ECHO. * If the current packet is an INIT we want to minimize the amount of * work we do. In particular, we should not build transport * structures for the addresses. */ static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_addr addr; int i; __u16 sat; int retval = 1; sctp_scope_t scope; time_t stale; struct sctp_af *af; union sctp_addr_param *addr_param; struct sctp_transport *t; struct sctp_endpoint *ep = asoc->ep; /* We maintain all INIT parameters in network byte order all the * time. This allows us to not worry about whether the parameters * came from a fresh INIT, and INIT ACK, or were stored in a cookie. */ switch (param.p->type) { case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 != asoc->base.sk->sk_family) break; goto do_addr_param; case SCTP_PARAM_IPV4_ADDRESS: /* v4 addresses are not allowed on v6-only socket */ if (ipv6_only_sock(asoc->base.sk)) break; do_addr_param: af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); scope = sctp_scope(peer_addr); if (sctp_in_scope(net, &addr, scope)) if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) return 0; break; case SCTP_PARAM_COOKIE_PRESERVATIVE: if (!net->sctp.cookie_preserve_enable) break; stale = ntohl(param.life->lifespan_increment); /* Suggested Cookie Life span increment's unit is msec, * (1/1000sec). */ asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale); break; case SCTP_PARAM_HOST_NAME_ADDRESS: pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__); break; case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: /* Turn off the default values first so we'll know which * ones are really set by the peer. */ asoc->peer.ipv4_address = 0; asoc->peer.ipv6_address = 0; /* Assume that peer supports the address family * by which it sends a packet. */ if (peer_addr->sa.sa_family == AF_INET6) asoc->peer.ipv6_address = 1; else if (peer_addr->sa.sa_family == AF_INET) asoc->peer.ipv4_address = 1; /* Cycle through address types; avoid divide by 0. */ sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); if (sat) sat /= sizeof(__u16); for (i = 0; i < sat; ++i) { switch (param.sat->types[i]) { case SCTP_PARAM_IPV4_ADDRESS: asoc->peer.ipv4_address = 1; break; case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 == asoc->base.sk->sk_family) asoc->peer.ipv6_address = 1; break; case SCTP_PARAM_HOST_NAME_ADDRESS: asoc->peer.hostname_address = 1; break; default: /* Just ignore anything else. */ break; } } break; case SCTP_PARAM_STATE_COOKIE: asoc->peer.cookie_len = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); asoc->peer.cookie = param.cookie->body; break; case SCTP_PARAM_HEARTBEAT_INFO: /* Would be odd to receive, but it causes no problems. */ break; case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: /* Rejected during verify stage. */ break; case SCTP_PARAM_ECN_CAPABLE: asoc->peer.ecn_capable = 1; break; case SCTP_PARAM_ADAPTATION_LAYER_IND: asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind); break; case SCTP_PARAM_SET_PRIMARY: if (!net->sctp.addip_enable) goto fall_through; addr_param = param.v + sizeof(sctp_addip_param_t); af = sctp_get_af_specific(param_type2af(param.p->type)); if (af == NULL) break; af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* if the address is invalid, we can't process it. * XXX: see spec for what to do. */ if (!af->addr_valid(&addr, NULL, NULL)) break; t = sctp_assoc_lookup_paddr(asoc, &addr); if (!t) break; sctp_assoc_set_primary(asoc, t); break; case SCTP_PARAM_SUPPORTED_EXT: sctp_process_ext_param(asoc, param); break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) { asoc->peer.prsctp_capable = 1; break; } /* Fall Through */ goto fall_through; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fall_through; /* Save peer's random parameter */ asoc->peer.peer_random = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_random) { retval = 0; break; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fall_through; /* Save peer's HMAC list */ asoc->peer.peer_hmacs = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_hmacs) { retval = 0; break; } /* Set the default HMAC the peer requested*/ sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo); break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fall_through; asoc->peer.peer_chunks = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_chunks) retval = 0; break; fall_through: default: /* Any unrecognized parameters should have been caught * and handled by sctp_verify_param() which should be * called prior to this routine. Simply log the error * here. */ pr_debug("%s: ignoring param:%d for association:%p.\n", __func__, ntohs(param.p->type), asoc); break; } return retval; } /* Select a new verification tag. */ __u32 sctp_generate_tag(const struct sctp_endpoint *ep) { /* I believe that this random number generator complies with RFC1750. * A tag of 0 is reserved for special cases (e.g. INIT). */ __u32 x; do { get_random_bytes(&x, sizeof(__u32)); } while (x == 0); return x; } /* Select an initial TSN to send during startup. */ __u32 sctp_generate_tsn(const struct sctp_endpoint *ep) { __u32 retval; get_random_bytes(&retval, sizeof(__u32)); return retval; } /* * ADDIP 3.1.1 Address Configuration Change Chunk (ASCONF) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Address Parameter and other parameter will not be wrapped in this function */ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, union sctp_addr *addr, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; length += addrlen; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length); if (!retval) return NULL; asconf.serial = htonl(asoc->addip_serial++); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); retval->param_hdr.v = sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP * 3.2.1 Add IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC001 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 3.2.2 Delete IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC002 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * */ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, union sctp_addr *laddr, struct sockaddr *addrs, int addrcnt, __be16 flags) { sctp_addip_param_t param; struct sctp_chunk *retval; union sctp_addr_param addr_param; union sctp_addr *addr; void *addr_buf; struct sctp_af *af; int paramlen = sizeof(param); int addr_param_len = 0; int totallen = 0; int i; int del_pickup = 0; /* Get total length of all the address parameters. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); totallen += paramlen; totallen += addr_param_len; addr_buf += af->sockaddr_len; if (asoc->asconf_addr_del_pending && !del_pickup) { /* reuse the parameter length from the same scope one */ totallen += paramlen; totallen += addr_param_len; del_pickup = 1; pr_debug("%s: picked same-scope del_pending addr, " "totallen for all addresses is %d\n", __func__, totallen); } } /* Create an asconf chunk with the required length. */ retval = sctp_make_asconf(asoc, laddr, totallen); if (!retval) return NULL; /* Add the address parameters to the asconf chunk. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = flags; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); addr_buf += af->sockaddr_len; } if (flags == SCTP_PARAM_ADD_IP && del_pickup) { addr = asoc->asconf_addr_del_pending; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = SCTP_PARAM_DEL_IP; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); } return retval; } /* ADDIP * 3.2.4 Set Primary IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type =0xC004 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF chunk with Set Primary IP address parameter. */ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr) { sctp_addip_param_t param; struct sctp_chunk *retval; int len = sizeof(param); union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; len += addrlen; /* Create the chunk and make asconf header. */ retval = sctp_make_asconf(asoc, addr, len); if (!retval) return NULL; param.param_hdr.type = SCTP_PARAM_SET_PRIMARY; param.param_hdr.length = htons(len); param.crr_id = 0; sctp_addto_chunk(retval, sizeof(param), &param); sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0x80 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF_ACK chunk with enough space for the parameter responses. */ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, __u32 serial, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length); if (!retval) return NULL; asconf.serial = htonl(serial); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); return retval; } /* Add response parameters to an ASCONF_ACK chunk. */ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, __be16 err_code, sctp_addip_param_t *asconf_param) { sctp_addip_param_t ack_param; sctp_errhdr_t err_param; int asconf_param_len = 0; int err_param_len = 0; __be16 response_type; if (SCTP_ERROR_NO_ERROR == err_code) { response_type = SCTP_PARAM_SUCCESS_REPORT; } else { response_type = SCTP_PARAM_ERR_CAUSE; err_param_len = sizeof(err_param); if (asconf_param) asconf_param_len = ntohs(asconf_param->param_hdr.length); } /* Add Success Indication or Error Cause Indication parameter. */ ack_param.param_hdr.type = response_type; ack_param.param_hdr.length = htons(sizeof(ack_param) + err_param_len + asconf_param_len); ack_param.crr_id = crr_id; sctp_addto_chunk(chunk, sizeof(ack_param), &ack_param); if (SCTP_ERROR_NO_ERROR == err_code) return; /* Add Error Cause parameter. */ err_param.cause = err_code; err_param.length = htons(err_param_len + asconf_param_len); sctp_addto_chunk(chunk, err_param_len, &err_param); /* Add the failed TLV copied from ASCONF chunk. */ if (asconf_param) sctp_addto_chunk(chunk, asconf_param_len, asconf_param); } /* Process a asconf parameter. */ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, struct sctp_chunk *asconf, sctp_addip_param_t *asconf_param) { struct sctp_transport *peer; struct sctp_af *af; union sctp_addr addr; union sctp_addr_param *addr_param; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) return SCTP_ERROR_UNKNOWN_PARAM; switch (addr_param->p.type) { case SCTP_PARAM_IPV6_ADDRESS: if (!asoc->peer.ipv6_address) return SCTP_ERROR_DNS_FAILED; break; case SCTP_PARAM_IPV4_ADDRESS: if (!asoc->peer.ipv4_address) return SCTP_ERROR_DNS_FAILED; break; default: return SCTP_ERROR_DNS_FAILED; } af = sctp_get_af_specific(param_type2af(addr_param->p.type)); if (unlikely(!af)) return SCTP_ERROR_DNS_FAILED; af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast * or multicast address. * (note: wildcard is permitted and requires special handling so * make sure we check for that) */ if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb)) return SCTP_ERROR_DNS_FAILED; switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* Section 4.2.1: * If the address 0.0.0.0 or ::0 is provided, the source * address of the packet MUST be added. */ if (af->is_any(&addr)) memcpy(&addr, &asconf->source, sizeof(addr)); /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address * request and does not have the local resources to add this * new address to the association, it MUST return an Error * Cause TLV set to the new error code 'Operation Refused * Due to Resource Shortage'. */ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); if (!peer) return SCTP_ERROR_RSRC_LOW; /* Start the heartbeat timer. */ if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) sctp_transport_hold(peer); asoc->new_transport = peer; break; case SCTP_PARAM_DEL_IP: /* ADDIP 4.3 D7) If a request is received to delete the * last remaining IP address of a peer endpoint, the receiver * MUST send an Error Cause TLV with the error cause set to the * new error code 'Request to Delete Last Remaining IP Address'. */ if (asoc->peer.transport_count == 1) return SCTP_ERROR_DEL_LAST_IP; /* ADDIP 4.3 D8) If a request is received to delete an IP * address which is also the source address of the IP packet * which contained the ASCONF chunk, the receiver MUST reject * this request. To reject the request the receiver MUST send * an Error Cause TLV set to the new error code 'Request to * Delete Source IP Address' */ if (sctp_cmp_addr_exact(&asconf->source, &addr)) return SCTP_ERROR_DEL_SRC_IP; /* Section 4.2.2 * If the address 0.0.0.0 or ::0 is provided, all * addresses of the peer except the source address of the * packet MUST be deleted. */ if (af->is_any(&addr)) { sctp_assoc_set_primary(asoc, asconf->transport); sctp_assoc_del_nonprimary_peers(asoc, asconf->transport); } else sctp_assoc_del_peer(asoc, &addr); break; case SCTP_PARAM_SET_PRIMARY: /* ADDIP Section 4.2.4 * If the address 0.0.0.0 or ::0 is provided, the receiver * MAY mark the source address of the packet as its * primary. */ if (af->is_any(&addr)) memcpy(&addr.v4, sctp_source(asconf), sizeof(addr)); peer = sctp_assoc_lookup_paddr(asoc, &addr); if (!peer) return SCTP_ERROR_DNS_FAILED; sctp_assoc_set_primary(asoc, peer); break; } return SCTP_ERROR_NO_ERROR; } /* Verify the ASCONF packet before we process it. */ bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, bool addr_param_needed, struct sctp_paramhdr **errp) { sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; union sctp_params param; bool addr_param_seen = false; sctp_walk_params(param, addip, addip_hdr.params) { size_t length = ntohs(param.p->length); *errp = param.p; switch (param.p->type) { case SCTP_PARAM_ERR_CAUSE: break; case SCTP_PARAM_IPV4_ADDRESS: if (length != sizeof(sctp_ipv4addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_IPV6_ADDRESS: if (length != sizeof(sctp_ipv6addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: /* In ASCONF chunks, these need to be first. */ if (addr_param_needed && !addr_param_seen) return false; length = ntohs(param.addip->param_hdr.length); if (length < sizeof(sctp_addip_param_t) + sizeof(sctp_paramhdr_t)) return false; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(sctp_addip_param_t)) return false; break; default: /* This is unkown to us, reject! */ return false; } } /* Remaining sanity checks. */ if (addr_param_needed && !addr_param_seen) return false; if (!addr_param_needed && addr_param_seen) return false; if (param.v != chunk->chunk_end) return false; return true; } /* Process an incoming ASCONF chunk with the next expected serial no. and * return an ASCONF_ACK chunk to be sent in response. */ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, struct sctp_chunk *asconf) { sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr; bool all_param_pass = true; union sctp_params param; sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; struct sctp_chunk *asconf_ack; __be16 err_code; int length = 0; int chunk_len; __u32 serial; chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); hdr = (sctp_addiphdr_t *)asconf->skb->data; serial = ntohl(hdr->serial); /* Skip the addiphdr and store a pointer to address parameter. */ length = sizeof(sctp_addiphdr_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); chunk_len -= length; /* Skip the address parameter and store a pointer to the first * asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; chunk_len -= length; /* create an ASCONF_ACK chunk. * Based on the definitions of parameters, we know that the size of * ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF * parameters. */ asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4); if (!asconf_ack) goto done; /* Process the TLVs contained within the ASCONF chunk. */ sctp_walk_params(param, addip, addip_hdr.params) { /* Skip preceeding address parameters. */ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS || param.p->type == SCTP_PARAM_IPV6_ADDRESS) continue; err_code = sctp_process_asconf_param(asoc, asconf, param.addip); /* ADDIP 4.1 A7) * If an error response is received for a TLV parameter, * all TLVs with no response before the failed TLV are * considered successful if not reported. All TLVs after * the failed response are considered unsuccessful unless * a specific success indication is present for the parameter. */ if (err_code != SCTP_ERROR_NO_ERROR) all_param_pass = false; if (!all_param_pass) sctp_add_asconf_response(asconf_ack, param.addip->crr_id, err_code, param.addip); /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add * an IP address sends an 'Out of Resource' in its response, it * MUST also fail any subsequent add or delete requests bundled * in the ASCONF. */ if (err_code == SCTP_ERROR_RSRC_LOW) goto done; } done: asoc->peer.addip_serial++; /* If we are sending a new ASCONF_ACK hold a reference to it in assoc * after freeing the reference to old asconf ack if any. */ if (asconf_ack) { sctp_chunk_hold(asconf_ack); list_add_tail(&asconf_ack->transmitted_list, &asoc->asconf_ack_list); } return asconf_ack; } /* Process a asconf parameter that is successfully acked. */ static void sctp_asconf_param_success(struct sctp_association *asoc, sctp_addip_param_t *asconf_param) { struct sctp_af *af; union sctp_addr addr; struct sctp_bind_addr *bp = &asoc->base.bind_addr; union sctp_addr_param *addr_param; struct sctp_transport *transport; struct sctp_sockaddr_entry *saddr; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); /* We have checked the packet before, so we do not check again. */ af = sctp_get_af_specific(param_type2af(addr_param->p.type)); af->from_addr_param(&addr, addr_param, htons(bp->port), 0); switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* This is always done in BH context with a socket lock * held, so the list can not change. */ local_bh_disable(); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, &addr)) saddr->state = SCTP_ADDR_SRC; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; case SCTP_PARAM_DEL_IP: local_bh_disable(); sctp_del_bind_addr(bp, &addr); if (asoc->asconf_addr_del_pending != NULL && sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) { kfree(asoc->asconf_addr_del_pending); asoc->asconf_addr_del_pending = NULL; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; default: break; } } /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk * for the given asconf parameter. If there is no response for this parameter, * return the error code based on the third argument 'no_err'. * ADDIP 4.1 * A7) If an error response is received for a TLV parameter, all TLVs with no * response before the failed TLV are considered successful if not reported. * All TLVs after the failed response are considered unsuccessful unless a * specific success indication is present for the parameter. */ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, sctp_addip_param_t *asconf_param, int no_err) { sctp_addip_param_t *asconf_ack_param; sctp_errhdr_t *err_param; int length; int asconf_ack_len; __be16 err_code; if (no_err) err_code = SCTP_ERROR_NO_ERROR; else err_code = SCTP_ERROR_REQ_REFUSED; asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); /* Skip the addiphdr from the asconf_ack chunk and store a pointer to * the first asconf_ack parameter. */ length = sizeof(sctp_addiphdr_t); asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + length); asconf_ack_len -= length; while (asconf_ack_len > 0) { if (asconf_ack_param->crr_id == asconf_param->crr_id) { switch (asconf_ack_param->param_hdr.type) { case SCTP_PARAM_SUCCESS_REPORT: return SCTP_ERROR_NO_ERROR; case SCTP_PARAM_ERR_CAUSE: length = sizeof(sctp_addip_param_t); err_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; if (asconf_ack_len > 0) return err_param->cause; else return SCTP_ERROR_INV_PARAM; break; default: return SCTP_ERROR_INV_PARAM; } } length = ntohs(asconf_ack_param->param_hdr.length); asconf_ack_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; } return err_code; } /* Process an incoming ASCONF_ACK chunk against the cached last ASCONF chunk. */ int sctp_process_asconf_ack(struct sctp_association *asoc, struct sctp_chunk *asconf_ack) { struct sctp_chunk *asconf = asoc->addip_last_asconf; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; int length = 0; int asconf_len = asconf->skb->len; int all_param_pass = 0; int no_err = 1; int retval = 0; __be16 err_code = SCTP_ERROR_NO_ERROR; /* Skip the chunkhdr and addiphdr from the last asconf sent and store * a pointer to address parameter. */ length = sizeof(sctp_addip_chunk_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); asconf_len -= length; /* Skip the address parameter in the last asconf sent and store a * pointer to the first asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; asconf_len -= length; /* ADDIP 4.1 * A8) If there is no response(s) to specific TLV parameter(s), and no * failures are indicated, then all request(s) are considered * successful. */ if (asconf_ack->skb->len == sizeof(sctp_addiphdr_t)) all_param_pass = 1; /* Process the TLVs contained in the last sent ASCONF chunk. */ while (asconf_len > 0) { if (all_param_pass) err_code = SCTP_ERROR_NO_ERROR; else { err_code = sctp_get_asconf_response(asconf_ack, asconf_param, no_err); if (no_err && (SCTP_ERROR_NO_ERROR != err_code)) no_err = 0; } switch (err_code) { case SCTP_ERROR_NO_ERROR: sctp_asconf_param_success(asoc, asconf_param); break; case SCTP_ERROR_RSRC_LOW: retval = 1; break; case SCTP_ERROR_UNKNOWN_PARAM: /* Disable sending this type of asconf parameter in * future. */ asoc->peer.addip_disabled_mask |= asconf_param->param_hdr.type; break; case SCTP_ERROR_REQ_REFUSED: case SCTP_ERROR_DEL_LAST_IP: case SCTP_ERROR_DEL_SRC_IP: default: break; } /* Skip the processed asconf parameter and move to the next * one. */ length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; asconf_len -= length; } if (no_err && asoc->src_out_of_asoc_ok) { asoc->src_out_of_asoc_ok = 0; sctp_transport_immediate_rtx(asoc->peer.primary_path); } /* Free the cached last sent asconf chunk. */ list_del_init(&asconf->transmitted_list); sctp_chunk_free(asconf); asoc->addip_last_asconf = NULL; return retval; } /* Make a FWD TSN chunk. */ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist) { struct sctp_chunk *retval = NULL; struct sctp_fwdtsn_hdr ftsn_hdr; struct sctp_fwdtsn_skip skip; size_t hint; int i; hint = (nstreams + 1) * sizeof(__u32); retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint); if (!retval) return NULL; ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn); retval->subh.fwdtsn_hdr = sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr); for (i = 0; i < nstreams; i++) { skip.stream = skiplist[i].stream; skip.ssn = skiplist[i].ssn; sctp_addto_chunk(retval, sizeof(skip), &skip); } return retval; }
./CrossVul/dataset_final_sorted/CWE-399/c/good_2289_0
crossvul-cpp_data_good_3846_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> */ /* * Changes: * Pedro Roque : Fast Retransmit/Recovery. * Two receive queues. * Retransmit queue handled by TCP. * Better retransmit timer handling. * New congestion avoidance. * Header prediction. * Variable renaming. * * Eric : Fast Retransmit. * Randy Scott : MSS option defines. * Eric Schenk : Fixes to slow start algorithm. * Eric Schenk : Yet another double ACK bug. * Eric Schenk : Delayed ACK bug fixes. * Eric Schenk : Floyd style fast retrans war avoidance. * David S. Miller : Don't allow zero congestion window. * Eric Schenk : Fix retransmitter so that it sends * next packet on ack of previous packet. * Andi Kleen : Moved open_request checking here * and process RSTs for open_requests. * Andi Kleen : Better prune_queue, and other fixes. * Andrey Savochkin: Fix RTT measurements in the presence of * timestamps. * Andrey Savochkin: Check sequence numbers correctly when * removing SACKs due to in sequence incoming * data segments. * Andi Kleen: Make sure we never ack data there is not * enough room for. Also make this condition * a fatal error if it might still happen. * Andi Kleen: Add tcp_measure_rcv_mss to make * connections with MSS<min(MTU,ann. MSS) * work without delayed acks. * Andi Kleen: Process packets with PSH set in the * fast path. * J Hadi Salim: ECN support * Andrei Gurtov, * Pasi Sarolahti, * Panu Kuhlberg: Experimental audit of TCP (re)transmission * engine. Lots of bugs are found. * Pasi Sarolahti: F-RTO for dealing with spurious RTOs */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/kernel.h> #include <net/dst.h> #include <net/tcp.h> #include <net/inet_common.h> #include <linux/ipsec.h> #include <asm/unaligned.h> #include <net/netdma.h> int sysctl_tcp_timestamps __read_mostly = 1; int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_sack __read_mostly = 1; int sysctl_tcp_fack __read_mostly = 1; int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; EXPORT_SYMBOL(sysctl_tcp_reordering); int sysctl_tcp_ecn __read_mostly = 2; EXPORT_SYMBOL(sysctl_tcp_ecn); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 2; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly = 2; int sysctl_tcp_frto_response __read_mostly; int sysctl_tcp_nometrics_save __read_mostly; int sysctl_tcp_thin_dupack __read_mostly; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_abc __read_mostly; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) /* Adapt the MSS value used to make delayed ack decision to the * real world. */ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) { struct inet_connection_sock *icsk = inet_csk(sk); const unsigned int lss = icsk->icsk_ack.last_seg_size; unsigned int len; icsk->icsk_ack.last_seg_size = 0; /* skb->len may jitter because of SACKs, even if peer * sends good full-sized frames. */ len = skb_shinfo(skb)->gso_size ? : skb->len; if (len >= icsk->icsk_ack.rcv_mss) { icsk->icsk_ack.rcv_mss = len; } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. * * "len" is invariant segment length, including TCP header. */ len += skb->data - skb_transport_header(skb); if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || /* If PSH is not set, packet should be * full sized, provided peer TCP is not badly broken. * This observation (if it is correct 8)) allows * to handle super-low mtu links fairly. */ (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { /* Subtract also invariant (if peer is RFC compliant), * tcp header plus fixed timestamp option length. * Resulting "len" is MSS free of SACK jitter. */ len -= tcp_sk(sk)->tcp_header_len; icsk->icsk_ack.last_seg_size = len; if (len == lss) { icsk->icsk_ack.rcv_mss = len; return; } } if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; } } static void tcp_incr_quickack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; if (quickacks > icsk->icsk_ack.quick) icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); } static void tcp_enter_quickack_mode(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_incr_quickack(sk); icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } /* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. */ static inline int tcp_in_quickack_mode(const struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; } static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) { if (tp->ecn_flags & TCP_ECN_OK) tp->ecn_flags |= TCP_ECN_QUEUE_CWR; } static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) { if (tcp_hdr(skb)->cwr) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) { tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { if (!(tp->ecn_flags & TCP_ECN_OK)) return; switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, * and we already seen ECT on a previous segment, * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) tcp_enter_quickack_mode((struct sock *)tp); break; case INET_ECN_CE: tp->ecn_flags |= TCP_ECN_DEMAND_CWR; /* fallinto */ default: tp->ecn_flags |= TCP_ECN_SEEN; } } static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) return 1; return 0; } /* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. */ static void tcp_fixup_sndbuf(struct sock *sk) { int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER); sndmem *= TCP_INIT_CWND; if (sk->sk_sndbuf < sndmem) sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); } /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) * * All tcp_full_space() is split to two parts: "network" buffer, allocated * forward and advertised in receiver window (tp->rcv_wnd) and * "application buffer", required to isolate scheduling/application * latencies from network. * window_clamp is maximal advertised window. It can be less than * tcp_full_space(), in this case tcp_full_space() - window_clamp * is reserved for "application" buffer. The less window_clamp is * the smoother our behaviour from viewpoint of network, but the lower * throughput and the higher sensitivity of the connection to losses. 8) * * rcv_ssthresh is more strict window_clamp used at "slow start" * phase to predict further behaviour of this connection. * It is used for two goals: * - to enforce header prediction at sender, even when application * requires some significant "application buffer". It is check #1. * - to prevent pruning of receive queue because of misprediction * of receiver window. Check #2. * * The scheme does not work when sender sends good segments opening * window and then starts to feed us spaghetti. But it should work * in common situations. Otherwise, we have to rely on queue collapsing. */ /* Slow part of check#2. */ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Optimize this! */ int truesize = tcp_win_from_space(skb->truesize) >> 1; int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; while (tp->rcv_ssthresh <= window) { if (truesize <= skb->len) return 2 * inet_csk(sk)->icsk_ack.rcv_mss; truesize >>= 1; window >>= 1; } return 0; } static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Check #1 */ if (tp->rcv_ssthresh < tp->window_clamp && (int)tp->rcv_ssthresh < tcp_space(sk) && !tcp_memory_pressure) { int incr; /* Check #2. Increase window, if skb with such overhead * will fit to rcvbuf in future. */ if (tcp_win_from_space(skb->truesize) <= skb->len) incr = 2 * tp->advmss; else incr = __tcp_grow_window(sk, skb); if (incr) { tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); inet_csk(sk)->icsk_ack.quick |= 1; } } } /* 3. Tuning rcvbuf, when connection enters established state. */ static void tcp_fixup_rcvbuf(struct sock *sk) { u32 mss = tcp_sk(sk)->advmss; u32 icwnd = TCP_DEFAULT_INIT_RCVWND; int rcvmem; /* Limit to 10 segments if mss <= 1460, * or 14600/mss segments, with a minimum of two segments. */ if (mss > 1460) icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER); while (tcp_win_from_space(rcvmem) < mss) rcvmem += 128; rcvmem *= icwnd; if (sk->sk_rcvbuf < rcvmem) sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); } /* 4. Try to fixup all. It is made immediately after connection enters * established state. */ static void tcp_init_buffer_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int maxwin; if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) tcp_fixup_rcvbuf(sk); if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_fixup_sndbuf(sk); tp->rcvq_space.space = tp->rcv_wnd; maxwin = tcp_full_space(sk); if (tp->window_clamp >= maxwin) { tp->window_clamp = maxwin; if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) tp->window_clamp = max(maxwin - (maxwin >> sysctl_tcp_app_win), 4 * tp->advmss); } /* Force reservation of one segment. */ if (sysctl_tcp_app_win && tp->window_clamp > 2 * tp->advmss && tp->window_clamp + tp->advmss > maxwin) tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); tp->snd_cwnd_stamp = tcp_time_stamp; } /* 5. Recalculate window clamp after socket hit its memory bounds. */ static void tcp_clamp_window(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ack.quick = 0; if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && !tcp_memory_pressure && atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), sysctl_tcp_rmem[2]); } if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); } /* Initialize RCV_MSS value. * RCV_MSS is an our guess about MSS used by the peer. * We haven't any direct information about the MSS. * It's better to underestimate the RCV_MSS rather than overestimate. * Overestimations make us ACKing less frequently than needed. * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). */ void tcp_initialize_rcv_mss(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); hint = min(hint, tp->rcv_wnd / 2); hint = min(hint, TCP_MSS_DEFAULT); hint = max(hint, TCP_MIN_MSS); inet_csk(sk)->icsk_ack.rcv_mss = hint; } EXPORT_SYMBOL(tcp_initialize_rcv_mss); /* Receiver "autotuning" code. * * The algorithm for RTT estimation w/o timestamps is based on * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. * <http://public.lanl.gov/radiant/pubs.html#DRS> * * More detail on this code can be found at * <http://staff.psc.edu/jheffner/>, * though this reference is out of date. A new paper * is pending. */ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) { u32 new_sample = tp->rcv_rtt_est.rtt; long m = sample; if (m == 0) m = 1; if (new_sample != 0) { /* If we sample in larger samples in the non-timestamp * case, we could grossly overestimate the RTT especially * with chatty applications or bulk transfer apps which * are stalled on filesystem I/O. * * Also, since we are only going for a minimum in the * non-timestamp case, we do not smooth things out * else with timestamps disabled convergence takes too * long. */ if (!win_dep) { m -= (new_sample >> 3); new_sample += m; } else if (m < new_sample) new_sample = m << 3; } else { /* No previous measure. */ new_sample = m << 3; } if (tp->rcv_rtt_est.rtt != new_sample) tp->rcv_rtt_est.rtt = new_sample; } static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) { if (tp->rcv_rtt_est.time == 0) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); new_measure: tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; tp->rcv_rtt_est.time = tcp_time_stamp; } static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (tp->rx_opt.rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); } /* * This function should be called every time data is copied to user space. * It calculates the appropriate TCP receive buffer space. */ void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int time; int space; if (tp->rcvq_space.time == 0) goto new_measure; time = tcp_time_stamp - tp->rcvq_space.time; if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) return; space = 2 * (tp->copied_seq - tp->rcvq_space.seq); space = max(tp->rcvq_space.space, space); if (tp->rcvq_space.space != space) { int rcvmem; tp->rcvq_space.space = space; if (sysctl_tcp_moderate_rcvbuf && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int new_clamp = space; /* Receive space grows, normalize in order to * take into account packet headers and sk_buff * structure overhead. */ space /= tp->advmss; if (!space) space = 1; rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); while (tcp_win_from_space(rcvmem) < tp->advmss) rcvmem += 128; space *= rcvmem; space = min(space, sysctl_tcp_rmem[2]); if (space > sk->sk_rcvbuf) { sk->sk_rcvbuf = space; /* Make the window clamp follow along. */ tp->window_clamp = new_clamp; } } } new_measure: tp->rcvq_space.seq = tp->copied_seq; tp->rcvq_space.time = tcp_time_stamp; } /* There is something which you must keep in mind when you analyze the * behavior of the tp->ato delayed ack timeout interval. When a * connection starts up, we want to ack as quickly as possible. The * problem is that "good" TCP's do slow start at the beginning of data * transmission. The means that until we send the first few ACK's the * sender will sit on his end and only queue most of his data, because * he can only send snd_cwnd unacked packets at any given time. For * each ACK we send, he increments snd_cwnd and transmits more of his * queue. -DaveM */ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); u32 now; inet_csk_schedule_ack(sk); tcp_measure_rcv_mss(sk, skb); tcp_rcv_rtt_measure(tp); now = tcp_time_stamp; if (!icsk->icsk_ack.ato) { /* The _first_ data packet received, initialize * delayed ACK engine. */ tcp_incr_quickack(sk); icsk->icsk_ack.ato = TCP_ATO_MIN; } else { int m = now - icsk->icsk_ack.lrcvtime; if (m <= TCP_ATO_MIN / 2) { /* The fastest case is the first. */ icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; } else if (m < icsk->icsk_ack.ato) { icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; if (icsk->icsk_ack.ato > icsk->icsk_rto) icsk->icsk_ack.ato = icsk->icsk_rto; } else if (m > icsk->icsk_rto) { /* Too long gap. Apparently sender failed to * restart window, so that we send ACKs quickly. */ tcp_incr_quickack(sk); sk_mem_reclaim(sk); } } icsk->icsk_ack.lrcvtime = now; TCP_ECN_check_ce(tp, skb); if (skb->len >= 128) tcp_grow_window(sk, skb); } /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 * piece by Van Jacobson. * NOTE: the next three routines used to be one big routine. * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) { struct tcp_sock *tp = tcp_sk(sk); long m = mrtt; /* RTT */ /* The following amusing code comes from Jacobson's * article in SIGCOMM '88. Note that rtt and mdev * are scaled versions of rtt and mean deviation. * This is designed to be as fast as possible * m stands for "measurement". * * On a 1990 paper the rto value is changed to: * RTO = rtt + 4 * mdev * * Funny. This algorithm seems to be very broken. * These formulae increase RTO, when it should be decreased, increase * too slowly, when it should be increased quickly, decrease too quickly * etc. I guess in BSD RTO takes ONE value, so that it is absolutely * does not matter how to _calculate_ it. Seems, it was trap * that VJ failed to avoid. 8) */ if (m == 0) m = 1; if (tp->srtt != 0) { m -= (tp->srtt >> 3); /* m is now error in rtt est */ tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ if (m < 0) { m = -m; /* m is now abs(error) */ m -= (tp->mdev >> 2); /* similar update on mdev */ /* This is similar to one of Eifel findings. * Eifel blocks mdev updates when rtt decreases. * This solution is a bit different: we use finer gain * for mdev in this case (alpha*beta). * Like Eifel it also prevents growth of rto, * but also it limits too fast rto decreases, * happening in pure Eifel. */ if (m > 0) m >>= 3; } else { m -= (tp->mdev >> 2); /* similar update on mdev */ } tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ if (tp->mdev > tp->mdev_max) { tp->mdev_max = tp->mdev; if (tp->mdev_max > tp->rttvar) tp->rttvar = tp->mdev_max; } if (after(tp->snd_una, tp->rtt_seq)) { if (tp->mdev_max < tp->rttvar) tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; tp->rtt_seq = tp->snd_nxt; tp->mdev_max = tcp_rto_min(sk); } } else { /* no previous measure. */ tp->srtt = m << 3; /* take the measured time to be rtt */ tp->mdev = m << 1; /* make sure rto = 3*rtt */ tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); tp->rtt_seq = tp->snd_nxt; } } /* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ static inline void tcp_set_rto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* Old crap is replaced with new one. 8) * * More seriously: * 1. If rtt variance happened to be less 50msec, it is hallucination. * It cannot be less due to utterly erratic ACK generation made * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ * to do with delayed acks, because at cwnd>2 true delack timeout * is invisible. Actually, Linux-2.4 also generates erratic * ACKs in some circumstances. */ inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); /* 2. Fixups made earlier cannot be right. * If we do not estimate RTO correctly without them, * all the algo is pure shit and should be replaced * with correct one. It is exactly, which we pretend to do. */ /* NOTE: clamping at TCP_RTO_MIN is not required, current algo * guarantees that rto is higher. */ tcp_bound_rto(sk); } /* Save metrics learned by this TCP session. This function is called only, when TCP finishes successfully i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. */ void tcp_update_metrics(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); if (sysctl_tcp_nometrics_save) return; dst_confirm(dst); if (dst && (dst->flags & DST_HOST)) { const struct inet_connection_sock *icsk = inet_csk(sk); int m; unsigned long rtt; if (icsk->icsk_backoff || !tp->srtt) { /* This session failed to estimate rtt. Why? * Probably, no packets returned in time. * Reset our results. */ if (!(dst_metric_locked(dst, RTAX_RTT))) dst_metric_set(dst, RTAX_RTT, 0); return; } rtt = dst_metric_rtt(dst, RTAX_RTT); m = rtt - tp->srtt; /* If newly calculated rtt larger than stored one, * store new one. Otherwise, use EWMA. Remember, * rtt overestimation is always better than underestimation. */ if (!(dst_metric_locked(dst, RTAX_RTT))) { if (m <= 0) set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); else set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); } if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { unsigned long var; if (m < 0) m = -m; /* Scale deviation to rttvar fixed point */ m >>= 1; if (m < tp->mdev) m = tp->mdev; var = dst_metric_rtt(dst, RTAX_RTTVAR); if (m >= var) var = m; else var -= (var - m) >> 2; set_dst_metric_rtt(dst, RTAX_RTTVAR, var); } if (tcp_in_initial_slowstart(tp)) { /* Slow start still did not finish. */ if (dst_metric(dst, RTAX_SSTHRESH) && !dst_metric_locked(dst, RTAX_SSTHRESH) && (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1); if (!dst_metric_locked(dst, RTAX_CWND) && tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd); } else if (tp->snd_cwnd > tp->snd_ssthresh && icsk->icsk_ca_state == TCP_CA_Open) { /* Cong. avoidance phase, cwnd is reliable. */ if (!dst_metric_locked(dst, RTAX_SSTHRESH)) dst_metric_set(dst, RTAX_SSTHRESH, max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); if (!dst_metric_locked(dst, RTAX_CWND)) dst_metric_set(dst, RTAX_CWND, (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1); } else { /* Else slow start did not finish, cwnd is non-sense, ssthresh may be also invalid. */ if (!dst_metric_locked(dst, RTAX_CWND)) dst_metric_set(dst, RTAX_CWND, (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1); if (dst_metric(dst, RTAX_SSTHRESH) && !dst_metric_locked(dst, RTAX_SSTHRESH) && tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh); } if (!dst_metric_locked(dst, RTAX_REORDERING)) { if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && tp->reordering != sysctl_tcp_reordering) dst_metric_set(dst, RTAX_REORDERING, tp->reordering); } } } __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) { __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); if (!cwnd) cwnd = TCP_INIT_CWND; return min_t(__u32, cwnd, tp->snd_cwnd_clamp); } /* Set slow start threshold and cwnd not falling to slow start */ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); tp->prior_ssthresh = 0; tp->bytes_acked = 0; if (icsk->icsk_ca_state < TCP_CA_CWR) { tp->undo_marker = 0; if (set_ssthresh) tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1U); tp->snd_cwnd_cnt = 0; tp->high_seq = tp->snd_nxt; tp->snd_cwnd_stamp = tcp_time_stamp; TCP_ECN_queue_cwr(tp); tcp_set_ca_state(sk, TCP_CA_CWR); } } /* * Packet counting of FACK is based on in-order assumptions, therefore TCP * disables it when reordering is detected */ static void tcp_disable_fack(struct tcp_sock *tp) { /* RFC3517 uses different metric in lost marker => reset on change */ if (tcp_is_fack(tp)) tp->lost_skb_hint = NULL; tp->rx_opt.sack_ok &= ~2; } /* Take a notice that peer is sending D-SACKs */ static void tcp_dsack_seen(struct tcp_sock *tp) { tp->rx_opt.sack_ok |= 4; } /* Initialize metrics on socket. */ static void tcp_init_metrics(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); if (dst == NULL) goto reset; dst_confirm(dst); if (dst_metric_locked(dst, RTAX_CWND)) tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); if (dst_metric(dst, RTAX_SSTHRESH)) { tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); if (tp->snd_ssthresh > tp->snd_cwnd_clamp) tp->snd_ssthresh = tp->snd_cwnd_clamp; } else { /* ssthresh may have been reduced unnecessarily during. * 3WHS. Restore it back to its initial default. */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; } if (dst_metric(dst, RTAX_REORDERING) && tp->reordering != dst_metric(dst, RTAX_REORDERING)) { tcp_disable_fack(tp); tp->reordering = dst_metric(dst, RTAX_REORDERING); } if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0) goto reset; /* Initial rtt is determined from SYN,SYN-ACK. * The segment is small and rtt may appear much * less than real one. Use per-dst memory * to make it more realistic. * * A bit of theory. RTT is time passed after "normal" sized packet * is sent until it is ACKed. In normal circumstances sending small * packets force peer to delay ACKs and calculation is correct too. * The algorithm is adaptive and, provided we follow specs, it * NEVER underestimate RTT. BUT! If peer tries to make some clever * tricks sort of "quick acks" for time long enough to decrease RTT * to low value, and then abruptly stops to do it and starts to delay * ACKs, wait for troubles. */ if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { tp->srtt = dst_metric_rtt(dst, RTAX_RTT); tp->rtt_seq = tp->snd_nxt; } if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); } tcp_set_rto(sk); reset: if (tp->srtt == 0) { /* RFC2988bis: We've failed to get a valid RTT sample from * 3WHS. This is most likely due to retransmission, * including spurious one. Reset the RTO back to 3secs * from the more aggressive 1sec to avoid more spurious * retransmission. */ tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; } /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been * retransmitted. In light of RFC2988bis' more aggressive 1sec * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK * retransmission has occurred. */ if (tp->total_retrans > 1) tp->snd_cwnd = 1; else tp->snd_cwnd = tcp_init_cwnd(tp, dst); tp->snd_cwnd_stamp = tcp_time_stamp; } static void tcp_update_reordering(struct sock *sk, const int metric, const int ts) { struct tcp_sock *tp = tcp_sk(sk); if (metric > tp->reordering) { int mib_idx; tp->reordering = min(TCP_MAX_REORDERING, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) mib_idx = LINUX_MIB_TCPTSREORDER; else if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENOREORDER; else if (tcp_is_fack(tp)) mib_idx = LINUX_MIB_TCPFACKREORDER; else mib_idx = LINUX_MIB_TCPSACKREORDER; NET_INC_STATS_BH(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, tp->reordering, tp->fackets_out, tp->sacked_out, tp->undo_marker ? tp->undo_retrans : 0); #endif tcp_disable_fack(tp); } } /* This must be called before lost_out is incremented */ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) { if ((tp->retransmit_skb_hint == NULL) || before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) tp->retransmit_skb_hint = skb; if (!tp->lost_out || after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) { if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tcp_verify_retransmit_hint(tp, skb); tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) { tcp_verify_retransmit_hint(tp, skb); if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } /* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). * Packets in queue with these bits set are counted in variables * sacked_out, retrans_out and lost_out, correspondingly. * * Valid combinations are: * Tag InFlight Description * 0 1 - orig segment is in flight. * S 0 - nothing flies, orig reached receiver. * L 0 - nothing flies, orig lost by net. * R 2 - both orig and retransmit are in flight. * L|R 1 - orig is lost, retransmit is in flight. * S|R 1 - orig reached receiver, retrans is still in flight. * (L|S|R is logically valid, it could occur when L|R is sacked, * but it is equivalent to plain S and code short-curcuits it to S. * L|S is logically invalid, it would mean -1 packet in flight 8)) * * These 6 states form finite state machine, controlled by the following events: * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) * 3. Loss detection event of one of three flavors: * A. Scoreboard estimator decided the packet is lost. * A'. Reno "three dupacks" marks head of queue lost. * A''. Its FACK modfication, head until snd.fack is lost. * B. SACK arrives sacking data transmitted after never retransmitted * hole was sent out. * C. SACK arrives sacking SND.NXT at the moment, when the * segment was retransmitted. * 4. D-SACK added new rule: D-SACK changes any tag to S. * * It is pleasant to note, that state diagram turns out to be commutative, * so that we are allowed not to be bothered by order of our actions, * when multiple events arrive simultaneously. (see the function below). * * Reordering detection. * -------------------- * Reordering metric is maximal distance, which a packet can be displaced * in packet stream. With SACKs we can estimate it: * * 1. SACK fills old hole and the corresponding segment was not * ever retransmitted -> reordering. Alas, we cannot use it * when segment was retransmitted. * 2. The last flaw is solved with D-SACK. D-SACK arrives * for retransmitted and already SACKed segment -> reordering.. * Both of these heuristics are not used in Loss state, when we cannot * account for retransmits accurately. * * SACK block validation. * ---------------------- * * SACK block range validation checks that the received SACK block fits to * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. * Note that SND.UNA is not included to the range though being valid because * it means that the receiver is rather inconsistent with itself reporting * SACK reneging when it should advance SND.UNA. Such SACK block this is * perfectly valid, however, in light of RFC2018 which explicitly states * that "SACK block MUST reflect the newest segment. Even if the newest * segment is going to be discarded ...", not that it looks very clever * in case of head skb. Due to potentional receiver driven attacks, we * choose to avoid immediate execution of a walk in write queue due to * reneging and defer head skb's loss recovery to standard loss recovery * procedure that will eventually trigger (nothing forbids us doing this). * * Implements also blockage to start_seq wrap-around. Problem lies in the * fact that though start_seq (s) is before end_seq (i.e., not reversed), * there's no guarantee that it will be before snd_nxt (n). The problem * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt * wrap (s_w): * * <- outs wnd -> <- wrapzone -> * u e n u_w e_w s n_w * | | | | | | | * |<------------+------+----- TCP seqno space --------------+---------->| * ...-- <2^31 ->| |<--------... * ...---- >2^31 ------>| |<--------... * * Current code wouldn't be vulnerable but it's better still to discard such * crazy SACK blocks. Doing this check for start_seq alone closes somewhat * similar case (end_seq after snd_nxt wrap) as earlier reversed check in * snd_nxt wrap -> snd_una region will then become "well defined", i.e., * equal to the ideal case (infinite seqno space without wrap caused issues). * * With D-SACK the lower bound is extended to cover sequence space below * SND.UNA down to undo_marker, which is the last point of interest. Yet * again, D-SACK block must not to go across snd_una (for the same reason as * for the normal SACK blocks, explained above). But there all simplicity * ends, TCP might receive valid D-SACKs below that. As long as they reside * fully below undo_marker they do not affect behavior in anyway and can * therefore be safely ignored. In rare cases (which are more or less * theoretical ones), the D-SACK will nicely cross that boundary due to skb * fragmentation and packet reordering past skb's retransmission. To consider * them correctly, the acceptable range must be extended even more though * the exact amount is rather hard to quantify. However, tp->max_window can * be used as an exaggerated estimate. */ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, u32 start_seq, u32 end_seq) { /* Too far in future, or reversed (interpretation is ambiguous) */ if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) return 0; /* Nasty start_seq wrap-around check (see comments above) */ if (!before(start_seq, tp->snd_nxt)) return 0; /* In outstanding window? ...This is valid exit for D-SACKs too. * start_seq == snd_una is non-sensical (see comments above) */ if (after(start_seq, tp->snd_una)) return 1; if (!is_dsack || !tp->undo_marker) return 0; /* ...Then it's D-SACK, and must reside below snd_una completely */ if (after(end_seq, tp->snd_una)) return 0; if (!before(start_seq, tp->undo_marker)) return 1; /* Too old */ if (!after(end_seq, tp->undo_marker)) return 0; /* Undo_marker boundary crossing (overestimates a lot). Known already: * start_seq < undo_marker and end_seq >= undo_marker. */ return !before(start_seq, end_seq - tp->max_window); } /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". * Event "C". Later note: FACK people cheated me again 8), we have to account * for reordering! Ugly, but should help. * * Search retransmitted skbs from write_queue that were sent when snd_nxt was * less than what is now known to be received by the other end (derived from * highest SACK block). Also calculate the lowest snd_nxt among the remaining * retransmitted skbs to avoid some costly processing per ACKs. */ static void tcp_mark_lost_retrans(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt = 0; u32 new_low_seq = tp->snd_nxt; u32 received_upto = tcp_highest_sack_seq(tp); if (!tcp_is_fack(tp) || !tp->retrans_out || !after(received_upto, tp->lost_retrans_low) || icsk->icsk_ca_state != TCP_CA_Recovery) return; tcp_for_write_queue(skb, sk) { u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; if (skb == tcp_send_head(sk)) break; if (cnt == tp->retrans_out) break; if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) continue; if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) continue; /* TODO: We would like to get rid of tcp_is_fack(tp) only * constraint here (see above) but figuring out that at * least tp->reordering SACK blocks reside between ack_seq * and received_upto is not easy task to do cheaply with * the available datastructures. * * Whether FACK should check here for tp->reordering segs * in-between one could argue for either way (it would be * rather simple to implement as we could count fack_count * during the walk and do tp->fackets_out - fack_count). */ if (after(received_upto, ack_seq)) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); tcp_skb_mark_lost_uncond_verify(tp, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); } else { if (before(ack_seq, new_low_seq)) new_low_seq = ack_seq; cnt += tcp_skb_pcount(skb); } } if (tp->retrans_out) tp->lost_retrans_low = new_low_seq; } static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, struct tcp_sack_block_wire *sp, int num_sacks, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); int dup_sack = 0; if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { dup_sack = 1; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); if (!after(end_seq_0, end_seq_1) && !before(start_seq_0, start_seq_1)) { dup_sack = 1; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); } } /* D-SACK for already forgotten data... Do dumb counting. */ if (dup_sack && tp->undo_marker && tp->undo_retrans && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) tp->undo_retrans--; return dup_sack; } struct tcp_sacktag_state { int reord; int fack_count; int flag; }; /* Check if skb is fully within the SACK block. In presence of GSO skbs, * the incoming SACK may not exactly match but we can find smaller MSS * aligned portion of it that matches. Therefore we might need to fragment * which may fail and creates some hassle (caller must handle error case * returns). * * FIXME: this could be merged to shift decision code */ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, u32 start_seq, u32 end_seq) { int in_sack, err; unsigned int pkt_len; unsigned int mss; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (tcp_skb_pcount(skb) > 1 && !in_sack && after(TCP_SKB_CB(skb)->end_seq, start_seq)) { mss = tcp_skb_mss(skb); in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { pkt_len = start_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) pkt_len = mss; } else { pkt_len = end_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) return -EINVAL; } /* Round if necessary so that SACKs cover only full MSSes * and/or the remaining small portion (if present) */ if (pkt_len > mss) { unsigned int new_len = (pkt_len / mss) * mss; if (!in_sack && new_len < pkt_len) { new_len += mss; if (new_len > skb->len) return 0; } pkt_len = new_len; } err = tcp_fragment(sk, skb, pkt_len, mss); if (err < 0) return err; } return in_sack; } static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, struct tcp_sacktag_state *state, int dup_sack, int pcount) { struct tcp_sock *tp = tcp_sk(sk); u8 sacked = TCP_SKB_CB(skb)->sacked; int fack_count = state->fack_count; /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { if (tp->undo_marker && tp->undo_retrans && after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) state->reord = min(fack_count, state->reord); } /* Nothing to do; acked frame is about to be dropped (was ACKed). */ if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, * we do not clear RETRANS, believing * that retransmission is still in flight. */ if (sacked & TCPCB_LOST) { sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); tp->lost_out -= pcount; tp->retrans_out -= pcount; } } else { if (!(sacked & TCPCB_RETRANS)) { /* New sack for not retransmitted frame, * which was in hole. It is reordering. */ if (before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); /* SACK enhanced F-RTO (RFC4138; Appendix B) */ if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) state->flag |= FLAG_ONLY_ORIG_SACKED; } if (sacked & TCPCB_LOST) { sacked &= ~TCPCB_LOST; tp->lost_out -= pcount; } } sacked |= TCPCB_SACKED_ACKED; state->flag |= FLAG_DATA_SACKED; tp->sacked_out += pcount; fack_count += pcount; /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) tp->lost_cnt_hint += pcount; if (fack_count > tp->fackets_out) tp->fackets_out = fack_count; } /* D-SACK. We can detect redundant retransmission in S|R and plain R * frames and clear it. undo_retrans is decreased above, L|R frames * are accounted above as well. */ if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= pcount; } return sacked; } static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, unsigned int pcount, int shifted, int mss, int dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); BUG_ON(!pcount); if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; TCP_SKB_CB(prev)->end_seq += shifted; TCP_SKB_CB(skb)->seq += shifted; skb_shinfo(prev)->gso_segs += pcount; BUG_ON(skb_shinfo(skb)->gso_segs < pcount); skb_shinfo(skb)->gso_segs -= pcount; /* When we're adding to gso_segs == 1, gso_size will be zero, * in theory this shouldn't be necessary but as long as DSACK * code can come after this skb later on it's better to keep * setting gso_size to something. */ if (!skb_shinfo(prev)->gso_size) { skb_shinfo(prev)->gso_size = mss; skb_shinfo(prev)->gso_type = sk->sk_gso_type; } /* CHECKME: To clear or not to clear? Mimics normal skb currently */ if (skb_shinfo(skb)->gso_segs <= 1) { skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_type = 0; } /* We discard results */ tcp_sacktag_one(skb, sk, state, dup_sack, pcount); /* Difference in this won't matter, both ACKed by the same cumul. ACK */ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); return 0; } /* Whole SKB was eaten :-) */ if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = prev; if (skb == tp->scoreboard_skb_hint) tp->scoreboard_skb_hint = prev; if (skb == tp->lost_skb_hint) { tp->lost_skb_hint = prev; tp->lost_cnt_hint -= tcp_skb_pcount(prev); } TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; if (skb == tcp_highest_sack(sk)) tcp_advance_highest_sack(sk, skb); tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); return 1; } /* I wish gso_size would have a bit more sane initialization than * something-or-zero which complicates things */ static int tcp_skb_seglen(const struct sk_buff *skb) { return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); } /* Shifting pages past head area doesn't work */ static int skb_can_shift(const struct sk_buff *skb) { return !skb_headlen(skb) && skb_is_nonlinear(skb); } /* Try collapsing SACK blocks spanning across multiple skbs to a single * skb. */ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, int dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev; int mss; int pcount = 0; int len; int in_sack; if (!sk_can_gso(sk)) goto fallback; /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) goto fallback; if (!skb_can_shift(skb)) goto fallback; /* This frame is about to be dropped (was ACKed). */ if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) goto fallback; /* Can only happen with delayed DSACK + discard craziness */ if (unlikely(skb == tcp_write_queue_head(sk))) goto fallback; prev = tcp_write_queue_prev(sk, skb); if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) goto fallback; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (in_sack) { len = skb->len; pcount = tcp_skb_pcount(skb); mss = tcp_skb_seglen(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; } else { if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) goto noop; /* CHECKME: This is non-MSS split case only?, this will * cause skipped skbs due to advancing loop btw, original * has that feature too */ if (tcp_skb_pcount(skb) <= 1) goto noop; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { /* TODO: head merge to next could be attempted here * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), * though it might not be worth of the additional hassle * * ...we can probably just fallback to what was done * previously. We could try merging non-SACKed ones * as well but it probably isn't going to buy off * because later SACKs might again split them, and * it would make skb timestamp tracking considerably * harder problem. */ goto fallback; } len = end_seq - TCP_SKB_CB(skb)->seq; BUG_ON(len < 0); BUG_ON(len > skb->len); /* MSS boundaries should be honoured or else pcount will * severely break even though it makes things bit trickier. * Optimize common case to avoid most of the divides */ mss = tcp_skb_mss(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; if (len == mss) { pcount = 1; } else if (len < mss) { goto noop; } else { pcount = len / mss; len = pcount * mss; } } if (!skb_shift(prev, skb, len)) goto fallback; if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) goto out; /* Hole filled allows collapsing with the next as well, this is very * useful when hole on every nth skb pattern happens */ if (prev == tcp_write_queue_tail(sk)) goto out; skb = tcp_write_queue_next(sk, prev); if (!skb_can_shift(skb) || (skb == tcp_send_head(sk)) || ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || (mss != tcp_skb_seglen(skb))) goto out; len = skb->len; if (skb_shift(prev, skb, len)) { pcount += tcp_skb_pcount(skb); tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); } out: state->fack_count += pcount; return prev; noop: return skb; fallback: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); return NULL; } static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, int dup_sack_in) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *tmp; tcp_for_write_queue_from(skb, sk) { int in_sack = 0; int dup_sack = dup_sack_in; if (skb == tcp_send_head(sk)) break; /* queue is in-order => we can short-circuit the walk early */ if (!before(TCP_SKB_CB(skb)->seq, end_seq)) break; if ((next_dup != NULL) && before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { in_sack = tcp_match_skb_to_sack(sk, skb, next_dup->start_seq, next_dup->end_seq); if (in_sack > 0) dup_sack = 1; } /* skb reference here is a bit tricky to get right, since * shifting can eat and free both this skb and the next, * so not even _safe variant of the loop is enough. */ if (in_sack <= 0) { tmp = tcp_shift_skb_data(sk, skb, state, start_seq, end_seq, dup_sack); if (tmp != NULL) { if (tmp != skb) { skb = tmp; continue; } in_sack = 0; } else { in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); } } if (unlikely(in_sack < 0)) break; if (in_sack) { TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, state, dup_sack, tcp_skb_pcount(skb)); if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) tcp_advance_highest_sack(sk, skb); } state->fack_count += tcp_skb_pcount(skb); } return skb; } /* Avoid all extra work that is being done by sacktag while walking in * a normal way */ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, struct tcp_sacktag_state *state, u32 skip_to_seq) { tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) break; state->fack_count += tcp_skb_pcount(skb); } return skb; } static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 skip_to_seq) { if (next_dup == NULL) return skb; if (before(next_dup->start_seq, skip_to_seq)) { skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); skb = tcp_sacktag_walk(skb, sk, NULL, state, next_dup->start_seq, next_dup->end_seq, 1); } return skb; } static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) { return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } static int tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, u32 prior_snd_una) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); const unsigned char *ptr = (skb_transport_header(ack_skb) + TCP_SKB_CB(ack_skb)->sacked); struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); struct tcp_sack_block sp[TCP_NUM_SACKS]; struct tcp_sack_block *cache; struct tcp_sacktag_state state; struct sk_buff *skb; int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); int used_sacks; int found_dup_sack = 0; int i, j; int first_sack_index; state.flag = 0; state.reord = tp->packets_out; if (!tp->sacked_out) { if (WARN_ON(tp->fackets_out)) tp->fackets_out = 0; tcp_highest_sack_reset(sk); } found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, num_sacks, prior_snd_una); if (found_dup_sack) state.flag |= FLAG_DSACKING_ACK; /* Eliminate too old ACKs, but take into * account more or less fresh ones, they can * contain valid SACK info. */ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) return 0; if (!tp->packets_out) goto out; used_sacks = 0; first_sack_index = 0; for (i = 0; i < num_sacks; i++) { int dup_sack = !i && found_dup_sack; sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); if (!tcp_is_sackblock_valid(tp, dup_sack, sp[used_sacks].start_seq, sp[used_sacks].end_seq)) { int mib_idx; if (dup_sack) { if (!tp->undo_marker) mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; else mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; } else { /* Don't count olds caused by ACK reordering */ if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && !after(sp[used_sacks].end_seq, tp->snd_una)) continue; mib_idx = LINUX_MIB_TCPSACKDISCARD; } NET_INC_STATS_BH(sock_net(sk), mib_idx); if (i == 0) first_sack_index = -1; continue; } /* Ignore very old stuff early */ if (!after(sp[used_sacks].end_seq, prior_snd_una)) continue; used_sacks++; } /* order SACK blocks to allow in order walk of the retrans queue */ for (i = used_sacks - 1; i > 0; i--) { for (j = 0; j < i; j++) { if (after(sp[j].start_seq, sp[j + 1].start_seq)) { swap(sp[j], sp[j + 1]); /* Track where the first SACK block goes to */ if (j == first_sack_index) first_sack_index = j + 1; } } } skb = tcp_write_queue_head(sk); state.fack_count = 0; i = 0; if (!tp->sacked_out) { /* It's already past, so skip checking against it */ cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } else { cache = tp->recv_sack_cache; /* Skip empty blocks in at head of the cache */ while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && !cache->end_seq) cache++; } while (i < used_sacks) { u32 start_seq = sp[i].start_seq; u32 end_seq = sp[i].end_seq; int dup_sack = (found_dup_sack && (i == first_sack_index)); struct tcp_sack_block *next_dup = NULL; if (found_dup_sack && ((i + 1) == first_sack_index)) next_dup = &sp[i + 1]; /* Event "B" in the comment above. */ if (after(end_seq, tp->high_seq)) state.flag |= FLAG_DATA_LOST; /* Skip too early cached blocks */ while (tcp_sack_cache_ok(tp, cache) && !before(start_seq, cache->end_seq)) cache++; /* Can skip some work by looking recv_sack_cache? */ if (tcp_sack_cache_ok(tp, cache) && !dup_sack && after(end_seq, cache->start_seq)) { /* Head todo? */ if (before(start_seq, cache->start_seq)) { skb = tcp_sacktag_skip(skb, sk, &state, start_seq); skb = tcp_sacktag_walk(skb, sk, next_dup, &state, start_seq, cache->start_seq, dup_sack); } /* Rest of the block already fully processed? */ if (!after(end_seq, cache->end_seq)) goto advance_sp; skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, &state, cache->end_seq); /* ...tail remains todo... */ if (tcp_highest_sack_seq(tp) == cache->end_seq) { /* ...but better entrypoint exists! */ skb = tcp_highest_sack(sk); if (skb == NULL) break; state.fack_count = tp->fackets_out; cache++; goto walk; } skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); /* Check overlap against next cached too (past this one already) */ cache++; continue; } if (!before(start_seq, tcp_highest_sack_seq(tp))) { skb = tcp_highest_sack(sk); if (skb == NULL) break; state.fack_count = tp->fackets_out; } skb = tcp_sacktag_skip(skb, sk, &state, start_seq); walk: skb = tcp_sacktag_walk(skb, sk, next_dup, &state, start_seq, end_seq, dup_sack); advance_sp: /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct * due to in-order walk */ if (after(end_seq, tp->frto_highmark)) state.flag &= ~FLAG_ONLY_ORIG_SACKED; i++; } /* Clear the head of the cache sack blocks so we can skip it next time */ for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { tp->recv_sack_cache[i].start_seq = 0; tp->recv_sack_cache[i].end_seq = 0; } for (j = 0; j < used_sacks; j++) tp->recv_sack_cache[i++] = sp[j]; tcp_mark_lost_retrans(sk); tcp_verify_left_out(tp); if ((state.reord < tp->fackets_out) && ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); out: #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); WARN_ON((int)tcp_packets_in_flight(tp) < 0); #endif return state.flag; } /* Limits sacked_out so that sum with lost_out isn't ever larger than * packets_out. Returns zero if sacked_out adjustement wasn't necessary. */ static int tcp_limit_reno_sacked(struct tcp_sock *tp) { u32 holes; holes = max(tp->lost_out, 1U); holes = min(holes, tp->packets_out); if ((tp->sacked_out + holes) > tp->packets_out) { tp->sacked_out = tp->packets_out - holes; return 1; } return 0; } /* If we receive more dupacks than we expected counting segments * in assumption of absent reordering, interpret this as reordering. * The only another reason could be bug in receiver TCP. */ static void tcp_check_reno_reordering(struct sock *sk, const int addend) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_limit_reno_sacked(tp)) tcp_update_reordering(sk, tp->packets_out + addend, 0); } /* Emulate SACKs for SACKless connection: account for a new dupack. */ static void tcp_add_reno_sack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->sacked_out++; tcp_check_reno_reordering(sk, 0); tcp_verify_left_out(tp); } /* Account for ACK, ACKing some data in Reno Recovery phase. */ static void tcp_remove_reno_sacks(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); if (acked > 0) { /* One ACK acked hole. The rest eat duplicate ACKs. */ if (acked - 1 >= tp->sacked_out) tp->sacked_out = 0; else tp->sacked_out -= acked - 1; } tcp_check_reno_reordering(sk, acked); tcp_verify_left_out(tp); } static inline void tcp_reset_reno_sack(struct tcp_sock *tp) { tp->sacked_out = 0; } static int tcp_is_sackfrto(const struct tcp_sock *tp) { return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp); } /* F-RTO can only be used if TCP has never retransmitted anything other than * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) */ int tcp_use_frto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; if (!sysctl_tcp_frto) return 0; /* MTU probe and F-RTO won't really play nicely along currently */ if (icsk->icsk_mtup.probe_size) return 0; if (tcp_is_sackfrto(tp)) return 1; /* Avoid expensive walking of rexmit queue if possible */ if (tp->retrans_out > 1) return 0; skb = tcp_write_queue_head(sk); if (tcp_skb_is_last(sk, skb)) return 1; skb = tcp_write_queue_next(sk, skb); /* Skips head */ tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) return 0; /* Short-circuit when first non-SACKed skb has been checked */ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) break; } return 1; } /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO * recovery a bit and use heuristics in tcp_process_frto() to detect if * the RTO was spurious. Only clear SACKED_RETRANS of the head here to * keep retrans_out counting accurate (with SACK F-RTO, other than head * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS * bits are handled if the Loss state is really to be entered (in * tcp_enter_frto_loss). * * Do like tcp_enter_loss() would; when RTO expires the second time it * does: * "Reduce ssthresh if it has not yet been made inside this window." */ void tcp_enter_frto(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) || tp->snd_una == tp->high_seq || ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) && !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); /* Our state is too optimistic in ssthresh() call because cwnd * is not reduced until tcp_enter_frto_loss() when previous F-RTO * recovery has not yet completed. Pattern would be this: RTO, * Cumulative ACK, RTO (2xRTO for the same segment does not end * up here twice). * RFC4138 should be more specific on what to do, even though * RTO is quite unlikely to occur after the first Cumulative ACK * due to back-off and complexity of triggering events ... */ if (tp->frto_counter) { u32 stored_cwnd; stored_cwnd = tp->snd_cwnd; tp->snd_cwnd = 2; tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tp->snd_cwnd = stored_cwnd; } else { tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); } /* ... in theory, cong.control module could do "any tricks" in * ssthresh(), which means that ca_state, lost bits and lost_out * counter would have to be faked before the call occurs. We * consider that too expensive, unlikely and hacky, so modules * using these in ssthresh() must deal these incompatibility * issues if they receives CA_EVENT_FRTO and frto_counter != 0 */ tcp_ca_event(sk, CA_EVENT_FRTO); } tp->undo_marker = tp->snd_una; tp->undo_retrans = 0; skb = tcp_write_queue_head(sk); if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); } tcp_verify_left_out(tp); /* Too bad if TCP was application limited */ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); /* Earlier loss recovery underway (see RFC4138; Appendix B). * The last condition is necessary at least in tp->frto_counter case. */ if (tcp_is_sackfrto(tp) && (tp->frto_counter || ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && after(tp->high_seq, tp->snd_una)) { tp->frto_highmark = tp->high_seq; } else { tp->frto_highmark = tp->snd_nxt; } tcp_set_ca_state(sk, TCP_CA_Disorder); tp->high_seq = tp->snd_nxt; tp->frto_counter = 1; } /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, * which indicates that we should follow the traditional RTO recovery, * i.e. mark everything lost and do go-back-N retransmission. */ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; tp->lost_out = 0; tp->retrans_out = 0; if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; /* * Count the retransmission made on RTO correctly (only when * waiting for the first ACK and did not get it)... */ if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) { /* For some reason this R-bit might get cleared? */ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) tp->retrans_out += tcp_skb_pcount(skb); /* ...enter this if branch just for the first segment */ flag |= FLAG_DATA_ACKED; } else { if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; } /* Marking forward transmissions that were made after RTO lost * can cause unnecessary retransmissions in some scenarios, * SACK blocks will mitigate that in some but not in all cases. * We used to not mark them but it was causing break-ups with * receivers that do only in-order receival. * * TODO: we could detect presence of such receiver and select * different behavior per flow. */ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } } tcp_verify_left_out(tp); tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->frto_counter = 0; tp->bytes_acked = 0; tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); tcp_clear_all_retrans_hints(tp); } static void tcp_clear_retrans_partial(struct tcp_sock *tp) { tp->retrans_out = 0; tp->lost_out = 0; tp->undo_marker = 0; tp->undo_retrans = 0; } void tcp_clear_retrans(struct tcp_sock *tp) { tcp_clear_retrans_partial(tp); tp->fackets_out = 0; tp->sacked_out = 0; } /* Enter Loss state. If "how" is not zero, forget all SACK information * and reset tags completely, otherwise preserve SACKs. If receiver * dropped its ofo queue, we will know this due to reneging detection. */ void tcp_enter_loss(struct sock *sk, int how) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->bytes_acked = 0; tcp_clear_retrans_partial(tp); if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); if (!how) { /* Push undo marker, if it was plain RTO and nothing * was retransmitted. */ tp->undo_marker = tp->snd_una; } else { tp->sacked_out = 0; tp->fackets_out = 0; } tcp_clear_all_retrans_hints(tp); tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } } tcp_verify_left_out(tp); tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); /* Abort F-RTO algorithm if one is in progress */ tp->frto_counter = 0; } /* If ACK arrived pointing to a remembered SACK, it means that our * remembered SACKs do not reflect real state of receiver i.e. * receiver _host_ is heavily congested (or buggy). * * Do processing similar to RTO timeout. */ static int tcp_check_sack_reneging(struct sock *sk, int flag) { if (flag & FLAG_SACK_RENEGING) { struct inet_connection_sock *icsk = inet_csk(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tcp_enter_loss(sk, 1); icsk->icsk_retransmits++; tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); return 1; } return 0; } static inline int tcp_fackets_out(const struct tcp_sock *tp) { return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; } /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs * counter when SACK is enabled (without SACK, sacked_out is used for * that purpose). * * Instead, with FACK TCP uses fackets_out that includes both SACKed * segments up to the highest received SACK block so far and holes in * between them. * * With reordering, holes may still be in flight, so RFC3517 recovery * uses pure sacked_out (total number of SACKed segments) even though * it violates the RFC that uses duplicate ACKs, often these are equal * but when e.g. out-of-window ACKs or packet duplication occurs, * they differ. Since neither occurs due to loss, TCP should really * ignore them. */ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) { return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; } static inline int tcp_skb_timedout(const struct sock *sk, const struct sk_buff *skb) { return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; } static inline int tcp_head_timedout(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return tp->packets_out && tcp_skb_timedout(sk, tcp_write_queue_head(sk)); } /* Linux NewReno/SACK/FACK/ECN state machine. * -------------------------------------- * * "Open" Normal state, no dubious events, fast path. * "Disorder" In all the respects it is "Open", * but requires a bit more attention. It is entered when * we see some SACKs or dupacks. It is split of "Open" * mainly to move some processing from fast path to slow one. * "CWR" CWND was reduced due to some Congestion Notification event. * It can be ECN, ICMP source quench, local device congestion. * "Recovery" CWND was reduced, we are fast-retransmitting. * "Loss" CWND was reduced due to RTO timeout or SACK reneging. * * tcp_fastretrans_alert() is entered: * - each incoming ACK, if state is not "Open" * - when arrived ACK is unusual, namely: * * SACK * * Duplicate ACK. * * ECN ECE. * * Counting packets in flight is pretty simple. * * in_flight = packets_out - left_out + retrans_out * * packets_out is SND.NXT-SND.UNA counted in packets. * * retrans_out is number of retransmitted segments. * * left_out is number of segments left network, but not ACKed yet. * * left_out = sacked_out + lost_out * * sacked_out: Packets, which arrived to receiver out of order * and hence not ACKed. With SACKs this number is simply * amount of SACKed data. Even without SACKs * it is easy to give pretty reliable estimate of this number, * counting duplicate ACKs. * * lost_out: Packets lost by network. TCP has no explicit * "loss notification" feedback from network (for now). * It means that this number can be only _guessed_. * Actually, it is the heuristics to predict lossage that * distinguishes different algorithms. * * F.e. after RTO, when all the queue is considered as lost, * lost_out = packets_out and in_flight = retrans_out. * * Essentially, we have now two algorithms counting * lost packets. * * FACK: It is the simplest heuristics. As soon as we decided * that something is lost, we decide that _all_ not SACKed * packets until the most forward SACK are lost. I.e. * lost_out = fackets_out - sacked_out and left_out = fackets_out. * It is absolutely correct estimate, if network does not reorder * packets. And it loses any connection to reality when reordering * takes place. We use FACK by default until reordering * is suspected on the path to this destination. * * NewReno: when Recovery is entered, we assume that one segment * is lost (classic Reno). While we are in Recovery and * a partial ACK arrives, we assume that one more packet * is lost (NewReno). This heuristics are the same in NewReno * and SACK. * * Imagine, that's all! Forget about all this shamanism about CWND inflation * deflation etc. CWND is real congestion window, never inflated, changes * only according to classic VJ rules. * * Really tricky (and requiring careful tuning) part of algorithm * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). * The first determines the moment _when_ we should reduce CWND and, * hence, slow down forward transmission. In fact, it determines the moment * when we decide that hole is caused by loss, rather than by a reorder. * * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill * holes, caused by lost packets. * * And the most logically complicated part of algorithm is undo * heuristics. We detect false retransmits due to both too early * fast retransmit (reordering) and underestimated RTO, analyzing * timestamps and D-SACKs. When we detect that some segments were * retransmitted by mistake and CWND reduction was wrong, we undo * window reduction and abort recovery phase. This logic is hidden * inside several functions named tcp_try_undo_<something>. */ /* This function decides, when we should leave Disordered state * and enter Recovery phase, reducing congestion window. * * Main question: may we further continue forward transmission * with the same cwnd? */ static int tcp_time_to_recover(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; /* Do not perform any recovery during F-RTO algorithm */ if (tp->frto_counter) return 0; /* Trick#1: The loss is proven. */ if (tp->lost_out) return 1; /* Not-A-Trick#2 : Classic rule... */ if (tcp_dupack_heuristics(tp) > tp->reordering) return 1; /* Trick#3 : when we use RFC2988 timer restart, fast * retransmit can be triggered by timeout of queue head. */ if (tcp_is_fack(tp) && tcp_head_timedout(sk)) return 1; /* Trick#4: It is still not OK... But will it be useful to delay * recovery more? */ packets_out = tp->packets_out; if (packets_out <= tp->reordering && tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && !tcp_may_send_now(sk)) { /* We have nothing to send. This connection is limited * either by receiver window or by application. */ return 1; } /* If a thin stream is detected, retransmit after first * received dupack. Employ only if SACK is supported in order * to avoid possible corner-case series of spurious retransmissions * Use only if there are no unsent data. */ if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && tcp_is_sack(tp) && !tcp_send_head(sk)) return 1; return 0; } /* New heuristics: it is possible only after we switched to restart timer * each time when something is ACKed. Hence, we can detect timed out packets * during fast retransmit without falling to slow start. * * Usefulness of this as is very questionable, since we should know which of * the segments is the next to timeout which is relatively expensive to find * in general case unless we add some data structure just for that. The * current approach certainly won't find the right one too often and when it * finally does find _something_ it usually marks large part of the window * right away (because a retransmission with a larger timestamp blocks the * loop from advancing). -ij */ static void tcp_timeout_skbs(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (!tcp_is_fack(tp) || !tcp_head_timedout(sk)) return; skb = tp->scoreboard_skb_hint; if (tp->scoreboard_skb_hint == NULL) skb = tcp_write_queue_head(sk); tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (!tcp_skb_timedout(sk, skb)) break; tcp_skb_mark_lost(tp, skb); } tp->scoreboard_skb_hint = skb; tcp_verify_left_out(tp); } /* Mark head of queue up as lost. With RFC3517 SACK, the packets is * is against sacked "cnt", otherwise it's against facked "cnt" */ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt, oldcnt; int err; unsigned int mss; WARN_ON(packets > tp->packets_out); if (tp->lost_skb_hint) { skb = tp->lost_skb_hint; cnt = tp->lost_cnt_hint; /* Head already handled? */ if (mark_head && skb != tcp_write_queue_head(sk)) return; } else { skb = tcp_write_queue_head(sk); cnt = 0; } tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; /* TODO: do this better */ /* this is not the most efficient way to do this... */ tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) break; oldcnt = cnt; if (tcp_is_fack(tp) || tcp_is_reno(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) cnt += tcp_skb_pcount(skb); if (cnt > packets) { if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || (oldcnt >= packets)) break; mss = skb_shinfo(skb)->gso_size; err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); if (err < 0) break; cnt = packets; } tcp_skb_mark_lost(tp, skb); if (mark_head) break; } tcp_verify_left_out(tp); } /* Account newly detected lost packet(s) */ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_reno(tp)) { tcp_mark_head_lost(sk, 1, 1); } else if (tcp_is_fack(tp)) { int lost = tp->fackets_out - tp->reordering; if (lost <= 0) lost = 1; tcp_mark_head_lost(sk, lost, 0); } else { int sacked_upto = tp->sacked_out - tp->reordering; if (sacked_upto >= 0) tcp_mark_head_lost(sk, sacked_upto, 0); else if (fast_rexmit) tcp_mark_head_lost(sk, 1, 1); } tcp_timeout_skbs(sk); } /* CWND moderation, preventing bursts due to too big ACKs * in dubious situations. */ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) { tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + tcp_max_burst(tp)); tp->snd_cwnd_stamp = tcp_time_stamp; } /* Lower bound on congestion window is slow start threshold * unless congestion avoidance choice decides to overide it. */ static inline u32 tcp_cwnd_min(const struct sock *sk) { const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; } /* Decrease cwnd each second ack. */ static void tcp_cwnd_down(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); int decr = tp->snd_cwnd_cnt + 1; if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) || (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) { tp->snd_cwnd_cnt = decr & 1; decr >>= 1; if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) tp->snd_cwnd -= decr; tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); tp->snd_cwnd_stamp = tcp_time_stamp; } } /* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */ static inline int tcp_packet_delayed(const struct tcp_sock *tp) { return !tp->retrans_stamp || (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp)); } /* Undo procedures. */ #if FASTRETRANS_DEBUG > 1 static void DBGUNDO(struct sock *sk, const char *msg) { struct tcp_sock *tp = tcp_sk(sk); struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", msg, &inet->inet_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, &np->daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #endif } #else #define DBGUNDO(x...) do { } while (0) #endif static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) { struct tcp_sock *tp = tcp_sk(sk); if (tp->prior_ssthresh) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->undo_cwnd) tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); else tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { tp->snd_ssthresh = tp->prior_ssthresh; TCP_ECN_withdraw_cwr(tp); } } else { tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); } tp->snd_cwnd_stamp = tcp_time_stamp; } static inline int tcp_may_undo(const struct tcp_sock *tp) { return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); } /* People celebrate: "We love our President!" */ static int tcp_try_undo_recovery(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { int mib_idx; /* Happy end! We did not retransmit anything * or our original transmission succeeded. */ DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_undo_cwr(sk, true); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) mib_idx = LINUX_MIB_TCPLOSSUNDO; else mib_idx = LINUX_MIB_TCPFULLUNDO; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->undo_marker = 0; } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); return 1; } tcp_set_ca_state(sk, TCP_CA_Open); return 0; } /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ static void tcp_try_undo_dsack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tp->undo_marker && !tp->undo_retrans) { DBGUNDO(sk, "D-SACK"); tcp_undo_cwr(sk, true); tp->undo_marker = 0; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); } } /* We can clear retrans_stamp when there are no retransmissions in the * window. It would seem that it is trivially available for us in * tp->retrans_out, however, that kind of assumptions doesn't consider * what will happen if errors occur when sending retransmission for the * second time. ...It could the that such segment has only * TCPCB_EVER_RETRANS set at the present time. It seems that checking * the head skb is enough except for some reneging corner cases that * are not worth the effort. * * Main reason for all this complexity is the fact that connection dying * time now depends on the validity of the retrans_stamp, in particular, * that successive retransmissions of a segment must not advance * retrans_stamp under any conditions. */ static int tcp_any_retrans_done(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (tp->retrans_out) return 1; skb = tcp_write_queue_head(sk); if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) return 1; return 0; } /* Undo during fast recovery after partial ACK. */ static int tcp_try_undo_partial(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); /* Partial ACK arrived. Force Hoe's retransmit. */ int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); if (tcp_may_undo(tp)) { /* Plain luck! Hole if filled with delayed * packet, rather than with a retransmit. */ if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); DBGUNDO(sk, "Hoe"); tcp_undo_cwr(sk, false); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); /* So... Do not make Hoe's retransmit yet. * If the first packet was delayed, the rest * ones are most probably delayed as well. */ failed = 0; } return failed; } /* Undo during loss recovery after partial ACK. */ static int tcp_try_undo_loss(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { struct sk_buff *skb; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; } tcp_clear_all_retrans_hints(tp); DBGUNDO(sk, "partial loss"); tp->lost_out = 0; tcp_undo_cwr(sk, true); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); inet_csk(sk)->icsk_retransmits = 0; tp->undo_marker = 0; if (tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); return 1; } return 0; } static inline void tcp_complete_cwr(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); /* Do not moderate cwnd if it's already undone in cwr or recovery. */ if (tp->undo_marker) { if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); else /* PRR */ tp->snd_cwnd = tp->snd_ssthresh; tp->snd_cwnd_stamp = tcp_time_stamp; } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); } static void tcp_try_keep_open(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int state = TCP_CA_Open; if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { tcp_set_ca_state(sk, state); tp->high_seq = tp->snd_nxt; } } static void tcp_try_to_open(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); tcp_verify_left_out(tp); if (!tp->frto_counter && !tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; if (flag & FLAG_ECE) tcp_enter_cwr(sk, 1); if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_keep_open(sk); if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) tcp_moderate_cwnd(tp); } else { tcp_cwnd_down(sk, flag); } } static void tcp_mtup_probe_failed(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk->icsk_mtup.probe_size = 0; } static void tcp_mtup_probe_success(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* FIXME: breaks with very large cwnd */ tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = tp->snd_cwnd * tcp_mss_to_mtu(sk, tp->mss_cache) / icsk->icsk_mtup.probe_size; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_ssthresh = tcp_current_ssthresh(sk); icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk->icsk_mtup.probe_size = 0; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } /* Do a simple retransmit without using the backoff mechanisms in * tcp_timer. This is used for path mtu discovery. * The socket is already locked here. */ void tcp_simple_retransmit(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int mss = tcp_current_mss(sk); u32 prior_lost = tp->lost_out; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; if (tcp_skb_seglen(skb) > mss && !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); } tcp_skb_mark_lost_uncond_verify(tp, skb); } } tcp_clear_retrans_hints_partial(tp); if (prior_lost == tp->lost_out) return; if (tcp_is_reno(tp)) tcp_limit_reno_sacked(tp); tcp_verify_left_out(tp); /* Don't muck with the congestion window here. * Reason is that we do not increase amount of _data_ * in network, but units changed and effective * cwnd/ssthresh really reduced now. */ if (icsk->icsk_ca_state != TCP_CA_Loss) { tp->high_seq = tp->snd_nxt; tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->prior_ssthresh = 0; tp->undo_marker = 0; tcp_set_ca_state(sk, TCP_CA_Loss); } tcp_xmit_retransmit_queue(sk); } EXPORT_SYMBOL(tcp_simple_retransmit); /* This function implements the PRR algorithm, specifcally the PRR-SSRB * (proportional rate reduction with slow start reduction bound) as described in * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt. * It computes the number of packets to send (sndcnt) based on packets newly * delivered: * 1) If the packets in flight is larger than ssthresh, PRR spreads the * cwnd reductions across a full RTT. * 2) If packets in flight is lower than ssthresh (such as due to excess * losses and/or application stalls), do not perform any further cwnd * reductions, but instead slow start up to ssthresh. */ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, int fast_rexmit, int flag) { struct tcp_sock *tp = tcp_sk(sk); int sndcnt = 0; int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + tp->prior_cwnd - 1; sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; } else { sndcnt = min_t(int, delta, max_t(int, tp->prr_delivered - tp->prr_out, newly_acked_sacked) + 1); } sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; } /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and * packets lost by network. * * Besides that it does CWND reduction, when packet loss is detected * and changes state of machine. * * It does _not_ decide what to send, it is made in function * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int newly_acked_sacked, bool is_dupack, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); int fast_rexmit = 0, mib_idx; if (WARN_ON(!tp->packets_out && tp->sacked_out)) tp->sacked_out = 0; if (WARN_ON(!tp->sacked_out && tp->fackets_out)) tp->fackets_out = 0; /* Now state machine starts. * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ if (flag & FLAG_ECE) tp->prior_ssthresh = 0; /* B. In all the states check for reneging SACKs. */ if (tcp_check_sack_reneging(sk, flag)) return; /* C. Process data loss notification, provided it is valid. */ if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) && before(tp->snd_una, tp->high_seq) && icsk->icsk_ca_state != TCP_CA_Open && tp->fackets_out > tp->reordering) { tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS); } /* D. Check consistency of the current state. */ tcp_verify_left_out(tp); /* E. Check state exit conditions. State can be terminated * when high_seq is ACKed. */ if (icsk->icsk_ca_state == TCP_CA_Open) { WARN_ON(tp->retrans_out != 0); tp->retrans_stamp = 0; } else if (!before(tp->snd_una, tp->high_seq)) { switch (icsk->icsk_ca_state) { case TCP_CA_Loss: icsk->icsk_retransmits = 0; if (tcp_try_undo_recovery(sk)) return; break; case TCP_CA_CWR: /* CWR is to be held something *above* high_seq * is ACKed for CWR bit to reach receiver. */ if (tp->snd_una != tp->high_seq) { tcp_complete_cwr(sk); tcp_set_ca_state(sk, TCP_CA_Open); } break; case TCP_CA_Recovery: if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); if (tcp_try_undo_recovery(sk)) return; tcp_complete_cwr(sk); break; } } /* F. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (tcp_is_reno(tp) && is_dupack) tcp_add_reno_sack(sk); } else do_lost = tcp_try_undo_partial(sk, pkts_acked); break; case TCP_CA_Loss: if (flag & FLAG_DATA_ACKED) icsk->icsk_retransmits = 0; if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); if (!tcp_try_undo_loss(sk)) { tcp_moderate_cwnd(tp); tcp_xmit_retransmit_queue(sk); return; } if (icsk->icsk_ca_state != TCP_CA_Open) return; /* Loss is undone; fall through to processing in Open state. */ default: if (tcp_is_reno(tp)) { if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); if (is_dupack) tcp_add_reno_sack(sk); } if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); if (!tcp_time_to_recover(sk)) { tcp_try_to_open(sk, flag); return; } /* MTU probe failure: don't reduce cwnd */ if (icsk->icsk_ca_state < TCP_CA_CWR && icsk->icsk_mtup.probe_size && tp->snd_una == tp->mtu_probe.probe_seq_start) { tcp_mtup_probe_failed(sk); /* Restores the reduction we did in tcp_mtup_probe() */ tp->snd_cwnd++; tcp_simple_retransmit(sk); return; } /* Otherwise enter Recovery state */ if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENORECOVERY; else mib_idx = LINUX_MIB_TCPSACKRECOVERY; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->high_seq = tp->snd_nxt; tp->prior_ssthresh = 0; tp->undo_marker = tp->snd_una; tp->undo_retrans = tp->retrans_out; if (icsk->icsk_ca_state < TCP_CA_CWR) { if (!(flag & FLAG_ECE)) tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); TCP_ECN_queue_cwr(tp); } tp->bytes_acked = 0; tp->snd_cwnd_cnt = 0; tp->prior_cwnd = tp->snd_cwnd; tp->prr_delivered = 0; tp->prr_out = 0; tcp_set_ca_state(sk, TCP_CA_Recovery); fast_rexmit = 1; } if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) tcp_update_scoreboard(sk, fast_rexmit); tp->prr_delivered += newly_acked_sacked; tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag); tcp_xmit_retransmit_queue(sk); } void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt) { tcp_rtt_estimator(sk, seq_rtt); tcp_set_rto(sk); inet_csk(sk)->icsk_backoff = 0; } EXPORT_SYMBOL(tcp_valid_rtt_meas); /* Read draft-ietf-tcplw-high-performance before mucking * with this code. (Supersedes RFC1323) */ static void tcp_ack_saw_tstamp(struct sock *sk, int flag) { /* RTTM Rule: A TSecr value received in a segment is used to * update the averaged RTT measurement only if the segment * acknowledges some new data, i.e., only if it advances the * left edge of the send window. * * See draft-ietf-tcplw-high-performance-00, section 3.3. * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> * * Changed: reset backoff as soon as we see the first valid sample. * If we do not, we get strongly overestimated rto. With timestamps * samples are accepted even from very old segments: f.e., when rtt=1 * increases to 8, we retransmit 5 times and after 8 seconds delayed * answer arrives rto becomes 120 seconds! If at least one of segments * in window is lost... Voila. --ANK (010210) */ struct tcp_sock *tp = tcp_sk(sk); tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr); } static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) { /* We don't have a timestamp. Can only use * packets that are not retransmitted to determine * rtt estimates. Also, we must not reset the * backoff for rto until we get a non-retransmitted * packet. This allows us to deal with a situation * where the network delay has increased suddenly. * I.e. Karn's algorithm. (SIGCOMM '87, p5.) */ if (flag & FLAG_RETRANS_DATA_ACKED) return; tcp_valid_rtt_meas(sk, seq_rtt); } static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, const s32 seq_rtt) { const struct tcp_sock *tp = tcp_sk(sk); /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tcp_ack_saw_tstamp(sk, flag); else if (seq_rtt >= 0) tcp_ack_no_tstamp(sk, seq_rtt, flag); } static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { const struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight); tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; } /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ static void tcp_rearm_rto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); } } /* If we get here, the whole TSO packet has not been acked. */ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); u32 packets_acked; BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); packets_acked = tcp_skb_pcount(skb); if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return 0; packets_acked -= tcp_skb_pcount(skb); if (packets_acked) { BUG_ON(tcp_skb_pcount(skb) == 0); BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); } return packets_acked; } /* Remove acknowledged frames from the retransmission queue. If our packet * is before the ack sequence we can discard it as it's confirmed to have * arrived at the other end. */ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; u32 now = tcp_time_stamp; int fully_acked = 1; int flag = 0; u32 pkts_acked = 0; u32 reord = tp->packets_out; u32 prior_sacked = tp->sacked_out; s32 seq_rtt = -1; s32 ca_seq_rtt = -1; ktime_t last_ackt = net_invalid_timestamp(); while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { struct tcp_skb_cb *scb = TCP_SKB_CB(skb); u32 acked_pcount; u8 sacked = scb->sacked; /* Determine how many packets and what bytes were acked, tso and else */ if (after(scb->end_seq, tp->snd_una)) { if (tcp_skb_pcount(skb) == 1 || !after(tp->snd_una, scb->seq)) break; acked_pcount = tcp_tso_acked(sk, skb); if (!acked_pcount) break; fully_acked = 0; } else { acked_pcount = tcp_skb_pcount(skb); } if (sacked & TCPCB_RETRANS) { if (sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= acked_pcount; flag |= FLAG_RETRANS_DATA_ACKED; ca_seq_rtt = -1; seq_rtt = -1; if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1)) flag |= FLAG_NONHEAD_RETRANS_ACKED; } else { ca_seq_rtt = now - scb->when; last_ackt = skb->tstamp; if (seq_rtt < 0) { seq_rtt = ca_seq_rtt; } if (!(sacked & TCPCB_SACKED_ACKED)) reord = min(pkts_acked, reord); } if (sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= acked_pcount; if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; tp->packets_out -= acked_pcount; pkts_acked += acked_pcount; /* Initial outgoing SYN's get put onto the write_queue * just like anything else we transmit. It is not * true data, and if we misinform our callers that * this ACK acks real data, we will erroneously exit * connection startup slow start one packet too * quickly. This is severely frowned upon behavior. */ if (!(scb->tcp_flags & TCPHDR_SYN)) { flag |= FLAG_DATA_ACKED; } else { flag |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; } if (!fully_acked) break; tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); tp->scoreboard_skb_hint = NULL; if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = NULL; if (skb == tp->lost_skb_hint) tp->lost_skb_hint = NULL; } if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) tp->snd_up = tp->snd_una; if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) flag |= FLAG_SACK_RENEGING; if (flag & FLAG_ACKED) { const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; if (unlikely(icsk->icsk_mtup.probe_size && !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { tcp_mtup_probe_success(sk); } tcp_ack_update_rtt(sk, flag, seq_rtt); tcp_rearm_rto(sk); if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); } else { int delta; /* Non-retransmitted hole got filled? That's reordering */ if (reord < prior_fackets) tcp_update_reordering(sk, tp->fackets_out - reord, 0); delta = tcp_is_fack(tp) ? pkts_acked : prior_sacked - tp->sacked_out; tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); } tp->fackets_out -= min(pkts_acked, tp->fackets_out); if (ca_ops->pkts_acked) { s32 rtt_us = -1; /* Is the ACK triggering packet unambiguous? */ if (!(flag & FLAG_RETRANS_DATA_ACKED)) { /* High resolution needed and available? */ if (ca_ops->flags & TCP_CONG_RTT_STAMP && !ktime_equal(last_ackt, net_invalid_timestamp())) rtt_us = ktime_us_delta(ktime_get_real(), last_ackt); else if (ca_seq_rtt >= 0) rtt_us = jiffies_to_usecs(ca_seq_rtt); } ca_ops->pkts_acked(sk, pkts_acked, rtt_us); } } #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); if (!tp->packets_out && tcp_is_sack(tp)) { icsk = inet_csk(sk); if (tp->lost_out) { printk(KERN_DEBUG "Leak l=%u %d\n", tp->lost_out, icsk->icsk_ca_state); tp->lost_out = 0; } if (tp->sacked_out) { printk(KERN_DEBUG "Leak s=%u %d\n", tp->sacked_out, icsk->icsk_ca_state); tp->sacked_out = 0; } if (tp->retrans_out) { printk(KERN_DEBUG "Leak r=%u %d\n", tp->retrans_out, icsk->icsk_ca_state); tp->retrans_out = 0; } } #endif return flag; } static void tcp_ack_probe(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* Was it a usable window open? */ if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { icsk->icsk_backoff = 0; inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); /* Socket must be waked up by subsequent tcp_data_snd_check(). * This function is not for random using! */ } else { inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), TCP_RTO_MAX); } } static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) { return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open; } static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) { const struct tcp_sock *tp = tcp_sk(sk); return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); } /* Check that window update is acceptable. * The function assumes that snd_una<=ack<=snd_next. */ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, const u32 ack_seq, const u32 nwin) { return after(ack, tp->snd_una) || after(ack_seq, tp->snd_wl1) || (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); } /* Update our send window. * * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 * and in FreeBSD. NetBSD's one is even worse.) is wrong. */ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, u32 ack_seq) { struct tcp_sock *tp = tcp_sk(sk); int flag = 0; u32 nwin = ntohs(tcp_hdr(skb)->window); if (likely(!tcp_hdr(skb)->syn)) nwin <<= tp->rx_opt.snd_wscale; if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { flag |= FLAG_WIN_UPDATE; tcp_update_wl(tp, ack_seq); if (tp->snd_wnd != nwin) { tp->snd_wnd = nwin; /* Note, it is the only place, where * fast path is recovered for sending TCP. */ tp->pred_flags = 0; tcp_fast_path_check(sk); if (nwin > tp->max_window) { tp->max_window = nwin; tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); } } } tp->snd_una = ack; return flag; } /* A very conservative spurious RTO response algorithm: reduce cwnd and * continue in congestion avoidance. */ static void tcp_conservative_spur_to_response(struct tcp_sock *tp) { tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd_cnt = 0; tp->bytes_acked = 0; TCP_ECN_queue_cwr(tp); tcp_moderate_cwnd(tp); } /* A conservative spurious RTO response algorithm: reduce cwnd using * rate halving and continue in congestion avoidance. */ static void tcp_ratehalving_spur_to_response(struct sock *sk) { tcp_enter_cwr(sk, 0); } static void tcp_undo_spur_to_response(struct sock *sk, int flag) { if (flag & FLAG_ECE) tcp_ratehalving_spur_to_response(sk); else tcp_undo_cwr(sk, true); } /* F-RTO spurious RTO detection algorithm (RFC4138) * * F-RTO affects during two new ACKs following RTO (well, almost, see inline * comments). State (ACK number) is kept in frto_counter. When ACK advances * window (but not to or beyond highest sequence sent before RTO): * On First ACK, send two new segments out. * On Second ACK, RTO was likely spurious. Do spurious response (response * algorithm is not part of the F-RTO detection algorithm * given in RFC4138 but can be selected separately). * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss * and TCP falls back to conventional RTO recovery. F-RTO allows overriding * of Nagle, this is done using frto_counter states 2 and 3, when a new data * segment of any size sent during F-RTO, state 2 is upgraded to 3. * * Rationale: if the RTO was spurious, new ACKs should arrive from the * original window even after we transmit two new data segments. * * SACK version: * on first step, wait until first cumulative ACK arrives, then move to * the second step. In second step, the next ACK decides. * * F-RTO is implemented (mainly) in four functions: * - tcp_use_frto() is used to determine if TCP is can use F-RTO * - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is * called when tcp_use_frto() showed green light * - tcp_process_frto() handles incoming ACKs during F-RTO algorithm * - tcp_enter_frto_loss() is called if there is not enough evidence * to prove that the RTO is indeed spurious. It transfers the control * from F-RTO to the conventional RTO recovery */ static int tcp_process_frto(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); tcp_verify_left_out(tp); /* Duplicate the behavior from Loss state (fastretrans_alert) */ if (flag & FLAG_DATA_ACKED) inet_csk(sk)->icsk_retransmits = 0; if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) tp->undo_marker = 0; if (!before(tp->snd_una, tp->frto_highmark)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); return 1; } if (!tcp_is_sackfrto(tp)) { /* RFC4138 shortcoming in step 2; should also have case c): * ACK isn't duplicate nor advances window, e.g., opposite dir * data, winupdate */ if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) return 1; if (!(flag & FLAG_DATA_ACKED)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), flag); return 1; } } else { if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { /* Prevent sending of new data. */ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)); return 1; } if ((tp->frto_counter >= 2) && (!(flag & FLAG_FORWARD_PROGRESS) || ((flag & FLAG_DATA_SACKED) && !(flag & FLAG_ONLY_ORIG_SACKED)))) { /* RFC4138 shortcoming (see comment above) */ if (!(flag & FLAG_FORWARD_PROGRESS) && (flag & FLAG_NOT_DUP)) return 1; tcp_enter_frto_loss(sk, 3, flag); return 1; } } if (tp->frto_counter == 1) { /* tcp_may_send_now needs to see updated state */ tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; tp->frto_counter = 2; if (!tcp_may_send_now(sk)) tcp_enter_frto_loss(sk, 2, flag); return 1; } else { switch (sysctl_tcp_frto_response) { case 2: tcp_undo_spur_to_response(sk, flag); break; case 1: tcp_conservative_spur_to_response(tp); break; default: tcp_ratehalving_spur_to_response(sk); break; } tp->frto_counter = 0; tp->undo_marker = 0; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); } return 0; } /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); u32 prior_snd_una = tp->snd_una; u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; bool is_dupack = false; u32 prior_in_flight; u32 prior_fackets; int prior_packets; int prior_sacked = tp->sacked_out; int pkts_acked = 0; int newly_acked_sacked = 0; int frto_cwnd = 0; /* If the ack is older than previous acks * then we can probably ignore it. */ if (before(ack, prior_snd_una)) goto old_ack; /* If the ack includes data we haven't sent yet, discard * this segment (RFC793 Section 3.9). */ if (after(ack, tp->snd_nxt)) goto invalid_ack; if (after(ack, prior_snd_una)) flag |= FLAG_SND_UNA_ADVANCED; if (sysctl_tcp_abc) { if (icsk->icsk_ca_state < TCP_CA_CWR) tp->bytes_acked += ack - prior_snd_una; else if (icsk->icsk_ca_state == TCP_CA_Loss) /* we assume just one segment left network */ tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache); } prior_fackets = tp->fackets_out; prior_in_flight = tcp_packets_in_flight(tp); if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. * No more checks are required. * Note, we use the fact that SND.UNA>=SND.WL2. */ tcp_update_wl(tp, ack_seq); tp->snd_una = ack; flag |= FLAG_WIN_UPDATE; tcp_ca_event(sk, CA_EVENT_FAST_ACK); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); if (TCP_SKB_CB(skb)->sacked) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) flag |= FLAG_ECE; tcp_ca_event(sk, CA_EVENT_SLOW_ACK); } /* We passed data and got it acked, remove any soft error * log. Something worked... */ sk->sk_err_soft = 0; icsk->icsk_probes_out = 0; tp->rcv_tstamp = tcp_time_stamp; prior_packets = tp->packets_out; if (!prior_packets) goto no_queue; /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); pkts_acked = prior_packets - tp->packets_out; newly_acked_sacked = (prior_packets - prior_sacked) - (tp->packets_out - tp->sacked_out); if (tp->frto_counter) frto_cwnd = tcp_process_frto(sk, flag); /* Guarantee sacktag reordering detection against wrap-arounds */ if (before(tp->frto_highmark, tp->snd_una)) tp->frto_highmark = 0; if (tcp_ack_is_dubious(sk, flag)) { /* Advance CWND, if state allows this. */ if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, is_dupack, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) tcp_cong_avoid(sk, ack, prior_in_flight); } if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) dst_confirm(__sk_dst_get(sk)); return 1; no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. */ if (tcp_send_head(sk)) tcp_ack_probe(sk); return 1; invalid_ack: SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return -1; old_ack: /* If data was SACKed, tag it and see if we should send more data. * If data was DSACKed, see if we can undo a cwnd reduction. */ if (TCP_SKB_CB(skb)->sacked) { flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); newly_acked_sacked = tp->sacked_out - prior_sacked; tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, is_dupack, flag); } SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return 0; } /* Look for tcp options. Normally only called on SYN and SYNACK packets. * But, this can also be called on packets in the established flow when * the fast version below fails. */ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, const u8 **hvpp, int estab) { const unsigned char *ptr; const struct tcphdr *th = tcp_hdr(skb); int length = (th->doff * 4) - sizeof(struct tcphdr); ptr = (const unsigned char *)(th + 1); opt_rx->saw_tstamp = 0; while (length > 0) { int opcode = *ptr++; int opsize; switch (opcode) { case TCPOPT_EOL: return; case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ length--; continue; default: opsize = *ptr++; if (opsize < 2) /* "silly options" */ return; if (opsize > length) return; /* don't parse partial options */ switch (opcode) { case TCPOPT_MSS: if (opsize == TCPOLEN_MSS && th->syn && !estab) { u16 in_mss = get_unaligned_be16(ptr); if (in_mss) { if (opt_rx->user_mss && opt_rx->user_mss < in_mss) in_mss = opt_rx->user_mss; opt_rx->mss_clamp = in_mss; } } break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW && th->syn && !estab && sysctl_tcp_window_scaling) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > 14) { if (net_ratelimit()) printk(KERN_INFO "tcp_parse_options: Illegal window " "scaling value %d >14 received.\n", snd_wscale); snd_wscale = 14; } opt_rx->snd_wscale = snd_wscale; } break; case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && ((estab && opt_rx->tstamp_ok) || (!estab && sysctl_tcp_timestamps))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); } break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && !estab && sysctl_tcp_sack) { opt_rx->sack_ok = 1; tcp_sack_reset(opt_rx); } break; case TCPOPT_SACK: if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && opt_rx->sack_ok) { TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; } break; #ifdef CONFIG_TCP_MD5SIG case TCPOPT_MD5SIG: /* * The MD5 Hash has already been * checked (see tcp_v{4,6}_do_rcv()). */ break; #endif case TCPOPT_COOKIE: /* This option is variable length. */ switch (opsize) { case TCPOLEN_COOKIE_BASE: /* not yet implemented */ break; case TCPOLEN_COOKIE_PAIR: /* not yet implemented */ break; case TCPOLEN_COOKIE_MIN+0: case TCPOLEN_COOKIE_MIN+2: case TCPOLEN_COOKIE_MIN+4: case TCPOLEN_COOKIE_MIN+6: case TCPOLEN_COOKIE_MAX: /* 16-bit multiple */ opt_rx->cookie_plus = opsize; *hvpp = ptr; break; default: /* ignore option */ break; } break; } ptr += opsize-2; length -= opsize; } } } EXPORT_SYMBOL(tcp_parse_options); static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) { const __be32 *ptr = (const __be32 *)(th + 1); if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { tp->rx_opt.saw_tstamp = 1; ++ptr; tp->rx_opt.rcv_tsval = ntohl(*ptr); ++ptr; tp->rx_opt.rcv_tsecr = ntohl(*ptr); return 1; } return 0; } /* Fast parse options. This hopes to only see timestamps. * If it is wrong it falls back on tcp_parse_options(). */ static int tcp_fast_parse_options(const struct sk_buff *skb, const struct tcphdr *th, struct tcp_sock *tp, const u8 **hvpp) { /* In the spirit of fast parsing, compare doff directly to constant * values. Because equality is used, short doff can be ignored here. */ if (th->doff == (sizeof(*th) / 4)) { tp->rx_opt.saw_tstamp = 0; return 0; } else if (tp->rx_opt.tstamp_ok && th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { if (tcp_parse_aligned_timestamp(tp, th)) return 1; } tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); return 1; } #ifdef CONFIG_TCP_MD5SIG /* * Parse MD5 Signature option */ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) { int length = (th->doff << 2) - sizeof(*th); const u8 *ptr = (const u8 *)(th + 1); /* If the TCP option is too short, we can short cut */ if (length < TCPOLEN_MD5SIG) return NULL; while (length > 0) { int opcode = *ptr++; int opsize; switch(opcode) { case TCPOPT_EOL: return NULL; case TCPOPT_NOP: length--; continue; default: opsize = *ptr++; if (opsize < 2 || opsize > length) return NULL; if (opcode == TCPOPT_MD5SIG) return opsize == TCPOLEN_MD5SIG ? ptr : NULL; } ptr += opsize - 2; length -= opsize; } return NULL; } EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif static inline void tcp_store_ts_recent(struct tcp_sock *tp) { tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; tp->rx_opt.ts_recent_stamp = get_seconds(); } static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) { if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { /* PAWS bug workaround wrt. ACK frames, the PAWS discard * extra check below makes sure this can only happen * for pure ACK frames. -DaveM * * Not only, also it occurs for expired timestamps. */ if (tcp_paws_check(&tp->rx_opt, 0)) tcp_store_ts_recent(tp); } } /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM * * It is not fatal. If this ACK does _not_ change critical state (seqs, window) * it can pass through stack. So, the following predicate verifies that * this segment is not used for anything but congestion avoidance or * fast retransmit. Moreover, we even are able to eliminate most of such * second order effects, if we apply some small "replay" window (~RTO) * to timestamp space. * * All these measures still do not guarantee that we reject wrapped ACKs * on networks with high bandwidth, when sequence space is recycled fastly, * but it guarantees that such events will be very rare and do not affect * connection seriously. This doesn't look nice, but alas, PAWS is really * buggy extension. * * [ Later note. Even worse! It is buggy for segments _with_ data. RFC * states that events when retransmit arrives after original data are rare. * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is * the biggest problem on large power networks even with minor reordering. * OK, let's give it small replay window. If peer clock is even 1hz, it is safe * up to bandwidth of 18Gigabit/sec. 8) ] */ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); const struct tcphdr *th = tcp_hdr(skb); u32 seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; return (/* 1. Pure ACK with correct sequence number. */ (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && /* 2. ... and duplicate ACK. */ ack == tp->snd_una && /* 3. ... and does not update window. */ !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && /* 4. ... and sits in replay window. */ (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); } static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && !tcp_disordered_ack(sk, skb); } /* Check segment sequence number for validity. * * Segment controls are considered valid, if the segment * fits to the window after truncation to the window. Acceptability * of data (and SYN, FIN, of course) is checked separately. * See tcp_data_queue(), for example. * * Also, controls (RST is main one) are accepted using RCV.WUP instead * of RCV.NXT. Peer still did not advance his SND.UNA when we * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. * (borrowed from freebsd) */ static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) { return !before(end_seq, tp->rcv_wup) && !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); } /* When we get a reset we do this. */ static void tcp_reset(struct sock *sk) { /* We want the right error as BSD sees it (and indeed as we do). */ switch (sk->sk_state) { case TCP_SYN_SENT: sk->sk_err = ECONNREFUSED; break; case TCP_CLOSE_WAIT: sk->sk_err = EPIPE; break; case TCP_CLOSE: return; default: sk->sk_err = ECONNRESET; } /* This barrier is coupled with smp_rmb() in tcp_poll() */ smp_wmb(); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); tcp_done(sk); } /* * Process the FIN bit. This now behaves as it is supposed to work * and the FIN takes effect when it is validly part of sequence * space. Not before when we get holes. * * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT * (and thence onto LAST-ACK and finally, CLOSE, we never enter * TIME-WAIT) * * If we are in FINWAIT-1, a received FIN indicates simultaneous * close and we go into CLOSING (and later onto TIME-WAIT) * * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. */ static void tcp_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); inet_csk_schedule_ack(sk); sk->sk_shutdown |= RCV_SHUTDOWN; sock_set_flag(sk, SOCK_DONE); switch (sk->sk_state) { case TCP_SYN_RECV: case TCP_ESTABLISHED: /* Move to CLOSE_WAIT */ tcp_set_state(sk, TCP_CLOSE_WAIT); inet_csk(sk)->icsk_ack.pingpong = 1; break; case TCP_CLOSE_WAIT: case TCP_CLOSING: /* Received a retransmission of the FIN, do * nothing. */ break; case TCP_LAST_ACK: /* RFC793: Remain in the LAST-ACK state. */ break; case TCP_FIN_WAIT1: /* This case occurs when a simultaneous close * happens, we must ack the received FIN and * enter the CLOSING state. */ tcp_send_ack(sk); tcp_set_state(sk, TCP_CLOSING); break; case TCP_FIN_WAIT2: /* Received a FIN -- send ACK and enter TIME_WAIT. */ tcp_send_ack(sk); tcp_time_wait(sk, TCP_TIME_WAIT, 0); break; default: /* Only TCP_LISTEN and TCP_CLOSE are left, in these * cases we should never reach this piece of code. */ printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", __func__, sk->sk_state); break; } /* It _is_ possible, that we have something out-of-order _after_ FIN. * Probably, we should reset in this case. For now drop them. */ __skb_queue_purge(&tp->out_of_order_queue); if (tcp_is_sack(tp)) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); /* Do not send POLL_HUP for half duplex close. */ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } } static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) { if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { if (before(seq, sp->start_seq)) sp->start_seq = seq; if (after(end_seq, sp->end_seq)) sp->end_seq = end_seq; return 1; } return 0; } static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { int mib_idx; if (before(seq, tp->rcv_nxt)) mib_idx = LINUX_MIB_TCPDSACKOLDSENT; else mib_idx = LINUX_MIB_TCPDSACKOFOSENT; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].end_seq = end_seq; } } static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (!tp->rx_opt.dsack) tcp_dsack_set(sk, seq, end_seq); else tcp_sack_extend(tp->duplicate_sack, seq, end_seq); } static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) end_seq = tp->rcv_nxt; tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); } } tcp_send_ack(sk); } /* These routines update the SACK block as out-of-order packets arrive or * in-order packets close up the sequence space. */ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) { int this_sack; struct tcp_sack_block *sp = &tp->selective_acks[0]; struct tcp_sack_block *swalk = sp + 1; /* See if the recent change to the first SACK eats into * or hits the sequence space of other SACK blocks, if so coalesce. */ for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { int i; /* Zap SWALK, by moving every further SACK up by one slot. * Decrease num_sacks. */ tp->rx_opt.num_sacks--; for (i = this_sack; i < tp->rx_opt.num_sacks; i++) sp[i] = sp[i + 1]; continue; } this_sack++, swalk++; } } static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_sack_block *sp = &tp->selective_acks[0]; int cur_sacks = tp->rx_opt.num_sacks; int this_sack; if (!cur_sacks) goto new_sack; for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { if (tcp_sack_extend(sp, seq, end_seq)) { /* Rotate this_sack to the first one. */ for (; this_sack > 0; this_sack--, sp--) swap(*sp, *(sp - 1)); if (cur_sacks > 1) tcp_sack_maybe_coalesce(tp); return; } } /* Could not find an adjacent existing SACK, build a new one, * put it at the front, and shift everyone else down. We * always know there is at least one SACK present already here. * * If the sack array is full, forget about the last one. */ if (this_sack >= TCP_NUM_SACKS) { this_sack--; tp->rx_opt.num_sacks--; sp--; } for (; this_sack > 0; this_sack--, sp--) *sp = *(sp - 1); new_sack: /* Build the new head SACK, and we're done. */ sp->start_seq = seq; sp->end_seq = end_seq; tp->rx_opt.num_sacks++; } /* RCV.NXT advances, some SACKs should be eaten. */ static void tcp_sack_remove(struct tcp_sock *tp) { struct tcp_sack_block *sp = &tp->selective_acks[0]; int num_sacks = tp->rx_opt.num_sacks; int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ if (skb_queue_empty(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; return; } for (this_sack = 0; this_sack < num_sacks;) { /* Check if the start of the sack is covered by RCV.NXT. */ if (!before(tp->rcv_nxt, sp->start_seq)) { int i; /* RCV.NXT must cover all the block! */ WARN_ON(before(tp->rcv_nxt, sp->end_seq)); /* Zap this SACK, by moving forward any other SACKS. */ for (i=this_sack+1; i < num_sacks; i++) tp->selective_acks[i-1] = tp->selective_acks[i]; num_sacks--; continue; } this_sack++; sp++; } tp->rx_opt.num_sacks = num_sacks; } /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ static void tcp_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; struct sk_buff *skb; while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { __u32 dsack = dsack_high; if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) dsack_high = TCP_SKB_CB(skb)->end_seq; tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); } if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { SOCK_DEBUG(sk, "ofo packet was already received\n"); __skb_unlink(skb, &tp->out_of_order_queue); __kfree_skb(skb); continue; } SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); __skb_unlink(skb, &tp->out_of_order_queue); __skb_queue_tail(&sk->sk_receive_queue, skb); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (tcp_hdr(skb)->fin) tcp_fin(sk); } } static int tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) { if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || !sk_rmem_schedule(sk, size)) { if (tcp_prune_queue(sk) < 0) return -1; if (!sk_rmem_schedule(sk, size)) { if (!tcp_prune_ofo_queue(sk)) return -1; if (!sk_rmem_schedule(sk, size)) return -1; } } return 0; } static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) goto drop; skb_dst_drop(skb); __skb_pull(skb, th->doff * 4); TCP_ECN_accept_cwr(tp, skb); tp->rx_opt.dsack = 0; /* Queue data for delivery to the user. * Packets in sequence go to the receive queue. * Out of sequence packets to the out_of_order_queue. */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { if (tcp_receive_window(tp) == 0) goto out_of_window; /* Ok. In sequence. In window. */ if (tp->ucopy.task == current && tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && sock_owned_by_user(sk) && !tp->urg_data) { int chunk = min_t(unsigned int, skb->len, tp->ucopy.len); __set_current_state(TASK_RUNNING); local_bh_enable(); if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; eaten = (chunk == skb->len); tcp_rcv_space_adjust(sk); } local_bh_disable(); } if (eaten <= 0) { queue_and_out: if (eaten < 0 && tcp_try_rmem_schedule(sk, skb->truesize)) goto drop; skb_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); } tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (skb->len) tcp_event_data_recv(sk, skb); if (th->fin) tcp_fin(sk); if (!skb_queue_empty(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ if (skb_queue_empty(&tp->out_of_order_queue)) inet_csk(sk)->icsk_ack.pingpong = 0; } if (tp->rx_opt.num_sacks) tcp_sack_remove(tp); tcp_fast_path_check(sk); if (eaten > 0) __kfree_skb(skb); else if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); return; } if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { /* A retransmit, 2nd most common case. Force an immediate ack. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: tcp_enter_quickack_mode(sk); inet_csk_schedule_ack(sk); drop: __kfree_skb(skb); return; } /* Out of window. F.e. zero window probe. */ if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) goto out_of_window; tcp_enter_quickack_mode(sk); if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { /* Partial packet, seq < rcv_next < end_seq */ SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); /* If window is closed, drop tail of packet. But after * remembering D-SACK for its head made in previous line. */ if (!tcp_receive_window(tp)) goto out_of_window; goto queue_and_out; } TCP_ECN_check_ce(tp, skb); if (tcp_try_rmem_schedule(sk, skb->truesize)) goto drop; /* Disable header prediction. */ tp->pred_flags = 0; inet_csk_schedule_ack(sk); SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); skb_set_owner_r(skb, sk); if (!skb_peek(&tp->out_of_order_queue)) { /* Initial out of order segment, build 1 SACK. */ if (tcp_is_sack(tp)) { tp->rx_opt.num_sacks = 1; tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq; } __skb_queue_head(&tp->out_of_order_queue, skb); } else { struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue); u32 seq = TCP_SKB_CB(skb)->seq; u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (seq == TCP_SKB_CB(skb1)->end_seq) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); if (!tp->rx_opt.num_sacks || tp->selective_acks[0].end_seq != seq) goto add_sack; /* Common case: data arrive in order after hole. */ tp->selective_acks[0].end_seq = end_seq; return; } /* Find place to insert this segment. */ while (1) { if (!after(TCP_SKB_CB(skb1)->seq, seq)) break; if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { skb1 = NULL; break; } skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); } /* Do skb overlap to previous one? */ if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { /* All the bits are present. Drop. */ __kfree_skb(skb); tcp_dsack_set(sk, seq, end_seq); goto add_sack; } if (after(seq, TCP_SKB_CB(skb1)->seq)) { /* Partial overlap. */ tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); } else { if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) skb1 = NULL; else skb1 = skb_queue_prev( &tp->out_of_order_queue, skb1); } } if (!skb1) __skb_queue_head(&tp->out_of_order_queue, skb); else __skb_queue_after(&tp->out_of_order_queue, skb1, skb); /* And clean segments covered by new one as whole. */ while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { skb1 = skb_queue_next(&tp->out_of_order_queue, skb); if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) break; if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, end_seq); break; } __skb_unlink(skb1, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); __kfree_skb(skb1); } add_sack: if (tcp_is_sack(tp)) tcp_sack_new_ofo_skb(sk, seq, end_seq); } } static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next = NULL; if (!skb_queue_is_last(list, skb)) next = skb_queue_next(list, skb); __skb_unlink(skb, list); __kfree_skb(skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); return next; } /* Collapse contiguous sequence of skbs head..tail with * sequence numbers start..end. * * If tail is NULL, this means until the end of the list. * * Segments with FIN/SYN are not collapsed (only because this * simplifies code) */ static void tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) { struct sk_buff *skb, *n; bool end_of_skbs; /* First, check that queue is collapsible and find * the point where collapsing can be useful. */ skb = head; restart: end_of_skbs = true; skb_queue_walk_from_safe(list, skb, n) { if (skb == tail) break; /* No new bits? It is possible on ofo queue. */ if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb) break; goto restart; } /* The first skb to collapse is: * - not SYN/FIN and * - bloated or contains data before "start" or * overlaps to the next one. */ if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && (tcp_win_from_space(skb->truesize) > skb->len || before(TCP_SKB_CB(skb)->seq, start))) { end_of_skbs = false; break; } if (!skb_queue_is_last(list, skb)) { struct sk_buff *next = skb_queue_next(list, skb); if (next != tail && TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { end_of_skbs = false; break; } } /* Decided to skip this, advance start seq. */ start = TCP_SKB_CB(skb)->end_seq; } if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) return; while (before(start, end)) { struct sk_buff *nskb; unsigned int header = skb_headroom(skb); int copy = SKB_MAX_ORDER(header, 0); /* Too big header? This can happen with IPv6. */ if (copy < 0) return; if (end - start < copy) copy = end - start; nskb = alloc_skb(copy + header, GFP_ATOMIC); if (!nskb) return; skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); skb_set_network_header(nskb, (skb_network_header(skb) - skb->head)); skb_set_transport_header(nskb, (skb_transport_header(skb) - skb->head)); skb_reserve(nskb, header); memcpy(nskb->head, skb->head, header); memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; __skb_queue_before(list, skb, nskb); skb_set_owner_r(nskb, sk); /* Copy data, releasing collapsed skbs. */ while (copy > 0) { int offset = start - TCP_SKB_CB(skb)->seq; int size = TCP_SKB_CB(skb)->end_seq - start; BUG_ON(offset < 0); if (size > 0) { size = min(copy, size); if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) BUG(); TCP_SKB_CB(nskb)->end_seq += size; copy -= size; start += size; } if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb || skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) return; } } } } /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs * and tcp_collapse() them until all the queue is collapsed. */ static void tcp_collapse_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); struct sk_buff *head; u32 start, end; if (skb == NULL) return; start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; head = skb; for (;;) { struct sk_buff *next = NULL; if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) next = skb_queue_next(&tp->out_of_order_queue, skb); skb = next; /* Segment is terminated when we see gap or when * we are at the end of all the queue. */ if (!skb || after(TCP_SKB_CB(skb)->seq, end) || before(TCP_SKB_CB(skb)->end_seq, start)) { tcp_collapse(sk, &tp->out_of_order_queue, head, skb, start, end); head = skb; if (!skb) break; /* Start new segment */ start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; } else { if (before(TCP_SKB_CB(skb)->seq, start)) start = TCP_SKB_CB(skb)->seq; if (after(TCP_SKB_CB(skb)->end_seq, end)) end = TCP_SKB_CB(skb)->end_seq; } } } /* * Purge the out-of-order queue. * Return true if queue was pruned. */ static int tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int res = 0; if (!skb_queue_empty(&tp->out_of_order_queue)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will * do the same at a timeout based retransmit. When a connection * is in a sad state like this, we care only about integrity * of the connection not performance. */ if (tp->rx_opt.sack_ok) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); res = 1; } return res; } /* Reduce allocated memory if we can, trying to get * the socket within its memory limits again. * * Return less than zero if we should start dropping frames * until the socket owning process reads some of the data * to stabilize the situation. */ static int tcp_prune_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_clamp_window(sk); else if (tcp_memory_pressure) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); tcp_collapse_ofo_queue(sk); if (!skb_queue_empty(&sk->sk_receive_queue)) tcp_collapse(sk, &sk->sk_receive_queue, skb_peek(&sk->sk_receive_queue), NULL, tp->copied_seq, tp->rcv_nxt); sk_mem_reclaim(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* Collapsing did not help, destructive actions follow. * This must not ever occur. */ tcp_prune_ofo_queue(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* If we are really being abused, tell the caller to silently * drop receive data on the floor. It will get retransmitted * and hopefully then we'll have sufficient space. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); /* Massive buffer overcommit. */ tp->pred_flags = 0; return -1; } /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. * As additional protections, we do not touch cwnd in retransmission phases, * and if application hit its sndbuf limit recently. */ void tcp_cwnd_application_limited(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { /* Limited by application or receiver window. */ u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); u32 win_used = max(tp->snd_cwnd_used, init_win); if (win_used < tp->snd_cwnd) { tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; } tp->snd_cwnd_used = 0; } tp->snd_cwnd_stamp = tcp_time_stamp; } static int tcp_should_expand_sndbuf(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* If the user specified a specific send buffer setting, do * not modify it. */ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) return 0; /* If we are under global TCP memory pressure, do not expand. */ if (tcp_memory_pressure) return 0; /* If we are under soft global TCP memory pressure, do not expand. */ if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) return 0; /* If we filled the congestion window, do not expand. */ if (tp->packets_out >= tp->snd_cwnd) return 0; return 1; } /* When incoming ACK allowed to free some skb from write_queue, * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket * on the exit from tcp input handler. * * PROBLEM: sndbuf expansion does not work well with largesend. */ static void tcp_new_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_should_expand_sndbuf(sk)) { int sndmem = SKB_TRUESIZE(max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + MAX_TCP_HEADER); int demanded = max_t(unsigned int, tp->snd_cwnd, tp->reordering + 1); sndmem *= 2 * demanded; if (sndmem > sk->sk_sndbuf) sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); tp->snd_cwnd_stamp = tcp_time_stamp; } sk->sk_write_space(sk); } static void tcp_check_space(struct sock *sk) { if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) tcp_new_space(sk); } } static inline void tcp_data_snd_check(struct sock *sk) { tcp_push_pending_frames(sk); tcp_check_space(sk); } /* * Check if sending an ack is needed. */ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) { struct tcp_sock *tp = tcp_sk(sk); /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && /* ... and right edge of window advances far enough. * (tcp_recvmsg() will send ACK otherwise). Or... */ __tcp_select_window(sk) >= tp->rcv_wnd) || /* We ACK each frame or... */ tcp_in_quickack_mode(sk) || /* We have out of order data. */ (ofo_possible && skb_peek(&tp->out_of_order_queue))) { /* Then ack it now */ tcp_send_ack(sk); } else { /* Else, send delayed ack. */ tcp_send_delayed_ack(sk); } } static inline void tcp_ack_snd_check(struct sock *sk) { if (!inet_csk_ack_scheduled(sk)) { /* We sent a data segment already. */ return; } __tcp_ack_snd_check(sk, 1); } /* * This routine is only called when we have urgent data * signaled. Its the 'slow' part of tcp_urg. It could be * moved inline now as tcp_urg is only called from one * place. We handle URGent data wrong. We have to - as * BSD still doesn't use the correction from RFC961. * For 1003.1g we should support a new option TCP_STDURG to permit * either form (or just set the sysctl tcp_stdurg). */ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); u32 ptr = ntohs(th->urg_ptr); if (ptr && !sysctl_tcp_stdurg) ptr--; ptr += ntohl(th->seq); /* Ignore urgent data that we've already seen and read. */ if (after(tp->copied_seq, ptr)) return; /* Do not replay urg ptr. * * NOTE: interesting situation not covered by specs. * Misbehaving sender may send urg ptr, pointing to segment, * which we already have in ofo queue. We are not able to fetch * such data and will stay in TCP_URG_NOTYET until will be eaten * by recvmsg(). Seems, we are not obliged to handle such wicked * situations. But it is worth to think about possibility of some * DoSes using some hypothetical application level deadlock. */ if (before(ptr, tp->rcv_nxt)) return; /* Do we already have a newer (or duplicate) urgent pointer? */ if (tp->urg_data && !after(ptr, tp->urg_seq)) return; /* Tell the world about our new urgent pointer. */ sk_send_sigurg(sk); /* We may be adding urgent data when the last byte read was * urgent. To do this requires some care. We cannot just ignore * tp->copied_seq since we would read the last urgent byte again * as data, nor can we alter copied_seq until this data arrives * or we break the semantics of SIOCATMARK (and thus sockatmark()) * * NOTE. Double Dutch. Rendering to plain English: author of comment * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); * and expect that both A and B disappear from stream. This is _wrong_. * Though this happens in BSD with high probability, this is occasional. * Any application relying on this is buggy. Note also, that fix "works" * only in this artificial test. Insert some normal data between A and B and we will * decline of BSD again. Verdict: it is better to remove to trap * buggy users. */ if (tp->urg_seq == tp->copied_seq && tp->urg_data && !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); tp->copied_seq++; if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } } tp->urg_data = TCP_URG_NOTYET; tp->urg_seq = ptr; /* Disable header prediction. */ tp->pred_flags = 0; } /* This is the 'fast' part of urgent handling. */ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); /* Check if we get a new urgent pointer - normally not. */ if (th->urg) tcp_check_urg(sk, th); /* Do we wait for any urgent data? - normally not... */ if (tp->urg_data == TCP_URG_NOTYET) { u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - th->syn; /* Is the urgent pointer pointing into this packet? */ if (ptr < skb->len) { u8 tmp; if (skb_copy_bits(skb, ptr, &tmp, 1)) BUG(); tp->urg_data = TCP_URG_VALID | tmp; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); } } } static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int err; local_bh_enable(); if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); else err = skb_copy_and_csum_datagram_iovec(skb, hlen, tp->ucopy.iov); if (!err) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; tcp_rcv_space_adjust(sk); } local_bh_disable(); return err; } static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { __sum16 result; if (sock_owned_by_user(sk)) { local_bh_enable(); result = __tcp_checksum_complete(skb); local_bh_disable(); } else { result = __tcp_checksum_complete(skb); } return result; } static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { return !skb_csum_unnecessary(skb) && __tcp_checksum_complete_user(sk, skb); } #ifdef CONFIG_NET_DMA static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int dma_cookie; int copied_early = 0; if (tp->ucopy.wakeup) return 0; if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list); if (dma_cookie < 0) goto out; tp->ucopy.dma_cookie = dma_cookie; copied_early = 1; tp->ucopy.len -= chunk; tp->copied_seq += chunk; tcp_rcv_space_adjust(sk); if ((tp->ucopy.len == 0) || (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { tp->ucopy.wakeup = 1; sk->sk_data_ready(sk, 0); } } else if (chunk > 0) { tp->ucopy.wakeup = 1; sk->sk_data_ready(sk, 0); } out: return copied_early; } #endif /* CONFIG_NET_DMA */ /* Does PAWS and seqno based validation of an incoming segment, flags will * play significant role here. */ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, int syn_inerr) { const u8 *hash_location; struct tcp_sock *tp = tcp_sk(sk); /* RFC1323: H1. Apply PAWS check first. */ if (tcp_fast_parse_options(skb, th, tp, &hash_location) && tp->rx_opt.saw_tstamp && tcp_paws_discard(sk, skb)) { if (!th->rst) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); tcp_send_dupack(sk, skb); goto discard; } /* Reset is accepted even if it did not pass PAWS. */ } /* Step 1: check sequence number */ if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { /* RFC793, page 37: "In all states except SYN-SENT, all reset * (RST) segments are validated by checking their SEQ-fields." * And page 69: "If an incoming segment is not acceptable, * an acknowledgment should be sent in reply (unless the RST * bit is set, if so drop the segment and return)". */ if (!th->rst) tcp_send_dupack(sk, skb); goto discard; } /* Step 2: check RST bit */ if (th->rst) { tcp_reset(sk); goto discard; } /* ts_recent update must be made after we are sure that the packet * is in window. */ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); /* step 3: check security and precedence [ignored] */ /* step 4: Check for a SYN in window. */ if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { if (syn_inerr) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); tcp_reset(sk); return -1; } return 1; discard: __kfree_skb(skb); return 0; } /* * TCP receive function for the ESTABLISHED state. * * It is split into a fast path and a slow path. The fast path is * disabled when: * - A zero window was announced from us - zero window probing * is only handled properly in the slow path. * - Out of order segments arrived. * - Urgent data is expected. * - There is no buffer space left * - Unexpected TCP flags/window values/header lengths are received * (detected by checking the TCP header against pred_flags) * - Data is sent in both directions. Fast path only supports pure senders * or pure receivers (this means either the sequence number or the ack * value must stay constant) * - Unexpected TCP option. * * When these conditions are not satisfied it drops into a standard * receive procedure patterned after RFC793 to handle all cases. * The first three cases are guaranteed by proper pred_flags setting, * the rest is checked inline. Fast processing is turned on in * tcp_data_queue when everything is OK. */ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); int res; /* * Header prediction. * The code loosely follows the one in the famous * "30 instruction TCP receive" Van Jacobson mail. * * Van's trick is to deposit buffers into socket queue * on a device interrupt, to call tcp_recv function * on the receive process context and checksum and copy * the buffer to user space. smart... * * Our current scheme is not silly either but we take the * extra cost of the net_bh soft interrupt processing... * We do checksum and copy also but from device to kernel. */ tp->rx_opt.saw_tstamp = 0; /* pred_flags is 0xS?10 << 16 + snd_wnd * if header_prediction is to be made * 'S' will always be tp->tcp_header_len >> 2 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to * turn it off (when there are holes in the receive * space for instance) * PSH flag is ignored. */ if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && TCP_SKB_CB(skb)->seq == tp->rcv_nxt && !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { int tcp_header_len = tp->tcp_header_len; /* Timestamp header prediction: tcp_header_len * is automatically equal to th->doff*4 due to pred_flags * match. */ /* Check timestamp */ if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { /* No? Slow path! */ if (!tcp_parse_aligned_timestamp(tp, th)) goto slow_path; /* If PAWS failed, check it more carefully in slow path */ if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) goto slow_path; /* DO NOT update ts_recent here, if checksum fails * and timestamp was corrupted part, it will result * in a hung connection since we will drop all * future packets due to the PAWS test. */ } if (len <= tcp_header_len) { /* Bulk data transfer: sender */ if (len == tcp_header_len) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); /* We know that such packets are checksummed * on entry. */ tcp_ack(sk, skb, 0); __kfree_skb(skb); tcp_data_snd_check(sk); return 0; } else { /* Header too small */ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); goto discard; } } else { int eaten = 0; int copied_early = 0; if (tp->copied_seq == tp->rcv_nxt && len - tcp_header_len <= tp->ucopy.len) { #ifdef CONFIG_NET_DMA if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { copied_early = 1; eaten = 1; } #endif if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) { __set_current_state(TASK_RUNNING); if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) eaten = 1; } if (eaten) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); __skb_pull(skb, tcp_header_len); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); } if (copied_early) tcp_cleanup_rbuf(sk, skb->len); } if (!eaten) { if (tcp_checksum_complete_user(sk, skb)) goto csum_error; /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); if ((int)skb->truesize > sk->sk_forward_alloc) goto step5; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ __skb_pull(skb, tcp_header_len); __skb_queue_tail(&sk->sk_receive_queue, skb); skb_set_owner_r(skb, sk); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; } tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { /* Well, only one small jumplet in fast path... */ tcp_ack(sk, skb, FLAG_DATA); tcp_data_snd_check(sk); if (!inet_csk_ack_scheduled(sk)) goto no_ack; } if (!copied_early || tp->rcv_nxt != tp->rcv_wup) __tcp_ack_snd_check(sk, 0); no_ack: #ifdef CONFIG_NET_DMA if (copied_early) __skb_queue_tail(&sk->sk_async_wait_queue, skb); else #endif if (eaten) __kfree_skb(skb); else sk->sk_data_ready(sk, 0); return 0; } } slow_path: if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) goto csum_error; /* * Standard slow path. */ res = tcp_validate_incoming(sk, skb, th, 1); if (res <= 0) return -res; step5: if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) goto discard; tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ tcp_data_queue(sk, skb); tcp_data_snd_check(sk); tcp_ack_snd_check(sk); return 0; csum_error: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); discard: __kfree_skb(skb); return 0; } EXPORT_SYMBOL(tcp_rcv_established); static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { const u8 *hash_location; struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_cookie_values *cvp = tp->cookie_values; int saved_clamp = tp->rx_opt.mss_clamp; tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); if (th->ack) { /* rfc793: * "If the state is SYN-SENT then * first check the ACK bit * If the ACK bit is set * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send * a reset (unless the RST bit is set, if so drop * the segment and return)" * * We do not send data with SYN, so that RFC-correct * test reduces to: */ if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) goto reset_and_undo; if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, tcp_time_stamp)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); goto reset_and_undo; } /* Now ACK is acceptable. * * "If the RST bit is set * If the ACK was acceptable then signal the user "error: * connection reset", drop the segment, enter CLOSED state, * delete TCB, and return." */ if (th->rst) { tcp_reset(sk); goto discard; } /* rfc793: * "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." * * See note below! * --ANK(990513) */ if (!th->syn) goto discard_and_undo; /* rfc793: * "If the SYN bit is on ... * are acceptable then ... * (our SYN has been ACKed), change the connection * state to ESTABLISHED..." */ TCP_ECN_rcv_synack(tp, th); tp->snd_wl1 = TCP_SKB_CB(skb)->seq; tcp_ack(sk, skb, FLAG_SLOWPATH); /* Ok.. it's good. Set up sequence numbers and * move to established. */ tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); if (!tp->rx_opt.wscale_ok) { tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; tp->window_clamp = min(tp->window_clamp, 65535U); } if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; tcp_store_ts_recent(tp); } else { tp->tcp_header_len = sizeof(struct tcphdr); } if (tcp_is_sack(tp) && sysctl_tcp_fack) tcp_enable_fack(tp); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); /* Remember, tcp_poll() does not lock socket! * Change state from SYN-SENT only after copied_seq * is initialized. */ tp->copied_seq = tp->rcv_nxt; if (cvp != NULL && cvp->cookie_pair_size > 0 && tp->rx_opt.cookie_plus > 0) { int cookie_size = tp->rx_opt.cookie_plus - TCPOLEN_COOKIE_BASE; int cookie_pair_size = cookie_size + cvp->cookie_desired; /* A cookie extension option was sent and returned. * Note that each incoming SYNACK replaces the * Responder cookie. The initial exchange is most * fragile, as protection against spoofing relies * entirely upon the sequence and timestamp (above). * This replacement strategy allows the correct pair to * pass through, while any others will be filtered via * Responder verification later. */ if (sizeof(cvp->cookie_pair) >= cookie_pair_size) { memcpy(&cvp->cookie_pair[cvp->cookie_desired], hash_location, cookie_size); cvp->cookie_pair_size = cookie_pair_size; } } smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); security_inet_conn_established(sk, skb); /* Make sure socket is routed, for correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_metrics(sk); tcp_init_congestion_control(sk); /* Prevent spurious tcp_cwnd_restart() on first data * packet. */ tp->lsndtime = tcp_time_stamp; tcp_init_buffer_space(sk); if (sock_flag(sk, SOCK_KEEPOPEN)) inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); if (!tp->rx_opt.snd_wscale) __tcp_fast_path_on(tp, tp->snd_wnd); else tp->pred_flags = 0; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); } if (sk->sk_write_pending || icsk->icsk_accept_queue.rskq_defer_accept || icsk->icsk_ack.pingpong) { /* Save one ACK. Data will be ready after * several ticks, if write_pending is set. * * It may be deleted, but with this feature tcpdumps * look so _wonderfully_ clever, that I was not able * to stand against the temptation 8) --ANK */ inet_csk_schedule_ack(sk); icsk->icsk_ack.lrcvtime = tcp_time_stamp; icsk->icsk_ack.ato = TCP_ATO_MIN; tcp_incr_quickack(sk); tcp_enter_quickack_mode(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); discard: __kfree_skb(skb); return 0; } else { tcp_send_ack(sk); } return -1; } /* No ACK in the segment */ if (th->rst) { /* rfc793: * "If the RST bit is set * * Otherwise (no ACK) drop the segment and return." */ goto discard_and_undo; } /* PAWS check. */ if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_reject(&tp->rx_opt, 0)) goto discard_and_undo; if (th->syn) { /* We see SYN without ACK. It is attempt of * simultaneous connect with crossed SYNs. * Particularly, it can be connect to self. */ tcp_set_state(sk, TCP_SYN_RECV); if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tcp_store_ts_recent(tp); tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { tp->tcp_header_len = sizeof(struct tcphdr); } tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); tp->snd_wl1 = TCP_SKB_CB(skb)->seq; tp->max_window = tp->snd_wnd; TCP_ECN_rcv_syn(tp, th); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); tcp_send_synack(sk); #if 0 /* Note, we could accept data and URG from this segment. * There are no obstacles to make this. * * However, if we ignore data in ACKless segments sometimes, * we have no reasons to accept it sometimes. * Also, seems the code doing it in step6 of tcp_rcv_state_process * is not flawless. So, discard packet for sanity. * Uncomment this return to process the data. */ return -1; #else goto discard; #endif } /* "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." */ discard_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; goto discard; reset_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; return 1; } /* * This function implements the receiving procedure of RFC 793 for * all states except ESTABLISHED and TIME_WAIT. * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be * address independent. */ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int queued = 0; int res; tp->rx_opt.saw_tstamp = 0; switch (sk->sk_state) { case TCP_CLOSE: goto discard; case TCP_LISTEN: if (th->ack) return 1; if (th->rst) goto discard; if (th->syn) { if (th->fin) goto discard; if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) return 1; /* Now we have several options: In theory there is * nothing else in the frame. KA9Q has an option to * send data with the syn, BSD accepts data with the * syn up to the [to be] advertised window and * Solaris 2.1 gives you a protocol error. For now * we just ignore it, that fits the spec precisely * and avoids incompatibilities. It would be nice in * future to drop through and process the data. * * Now that TTCP is starting to be used we ought to * queue this data. * But, this leaves one open to an easy denial of * service attack, and SYN cookies can't defend * against this problem. So, we drop the data * in the interest of security over speed unless * it's still in use. */ kfree_skb(skb); return 0; } goto discard; case TCP_SYN_SENT: queued = tcp_rcv_synsent_state_process(sk, skb, th, len); if (queued >= 0) return queued; /* Do step6 onward by hand. */ tcp_urg(sk, skb, th); __kfree_skb(skb); tcp_data_snd_check(sk); return 0; } res = tcp_validate_incoming(sk, skb, th, 0); if (res <= 0) return -res; /* step 5: check the ACK field */ if (th->ack) { int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; switch (sk->sk_state) { case TCP_SYN_RECV: if (acceptable) { tp->copied_seq = tp->rcv_nxt; smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); sk->sk_state_change(sk); /* Note, that this wakeup is only for marginal * crossed SYN case. Passively open sockets * are not waked up, because sk->sk_sleep == * NULL and sk->sk_socket == NULL. */ if (sk->sk_socket) sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); tp->snd_una = TCP_SKB_CB(skb)->ack_seq; tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; /* Make sure socket is routed, for * correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_metrics(sk); tcp_init_congestion_control(sk); /* Prevent spurious tcp_cwnd_restart() on * first data packet. */ tp->lsndtime = tcp_time_stamp; tcp_mtup_init(sk); tcp_initialize_rcv_mss(sk); tcp_init_buffer_space(sk); tcp_fast_path_on(tp); } else { return 1; } break; case TCP_FIN_WAIT1: if (tp->snd_una == tp->write_seq) { tcp_set_state(sk, TCP_FIN_WAIT2); sk->sk_shutdown |= SEND_SHUTDOWN; dst_confirm(__sk_dst_get(sk)); if (!sock_flag(sk, SOCK_DEAD)) /* Wake up lingering close() */ sk->sk_state_change(sk); else { int tmo; if (tp->linger2 < 0 || (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { tcp_done(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); return 1; } tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else if (th->fin || sock_owned_by_user(sk)) { /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing * and not so rare event. We still can lose it now, * if it spins in bh_lock_sock(), but it is really * marginal case. */ inet_csk_reset_keepalive_timer(sk, tmo); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto discard; } } } break; case TCP_CLOSING: if (tp->snd_una == tp->write_seq) { tcp_time_wait(sk, TCP_TIME_WAIT, 0); goto discard; } break; case TCP_LAST_ACK: if (tp->snd_una == tp->write_seq) { tcp_update_metrics(sk); tcp_done(sk); goto discard; } break; } } else goto discard; /* step 6: check the URG bit */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ switch (sk->sk_state) { case TCP_CLOSE_WAIT: case TCP_CLOSING: case TCP_LAST_ACK: if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; case TCP_FIN_WAIT1: case TCP_FIN_WAIT2: /* RFC 793 says to queue data in these states, * RFC 1122 says we MUST send a reset. * BSD 4.4 also does reset. */ if (sk->sk_shutdown & RCV_SHUTDOWN) { if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_reset(sk); return 1; } } /* Fall through */ case TCP_ESTABLISHED: tcp_data_queue(sk, skb); queued = 1; break; } /* tcp_data could move socket to TIME-WAIT */ if (sk->sk_state != TCP_CLOSE) { tcp_data_snd_check(sk); tcp_ack_snd_check(sk); } if (!queued) { discard: __kfree_skb(skb); } return 0; } EXPORT_SYMBOL(tcp_rcv_state_process);
./CrossVul/dataset_final_sorted/CWE-399/c/good_3846_0
crossvul-cpp_data_good_946_1
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % OOO PPPP EEEE RRRR AA TTTTT III OOO N N % % O O P P E R R A A T I O O NN N % % O O PPPP EEE RRRR AAAA T I O O N N N % % O O P E R R A A T I O O N NN % % OOO P EEEE R RR A A T III OOO N N % % % % % % CLI Magick Option Methods % % % % Dragon Computing % % Anthony Thyssen % % September 2011 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Apply the given options (settings, and simple, or sequence operations) to % the given image(s) according to the current "image_info", "draw_info", and % "quantize_info" settings, stored in a special CLI Image Wand. % % The final goal is to allow the execution in a strict one option at a time % manner that is needed for 'pipelining and file scripting' of options in % IMv7. % % Anthony Thyssen, September 2011 */ /* Include declarations. */ #include "MagickWand/studio.h" #include "MagickWand/MagickWand.h" #include "MagickWand/magick-wand-private.h" #include "MagickWand/mogrify.h" #include "MagickWand/operation.h" #include "MagickWand/wand.h" #include "MagickWand/wandcli.h" #include "MagickWand/wandcli-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/image-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-private.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer-private.h" /* Constant declaration. */ static const char MogrifyAlphaColor[] = "#bdbdbd", /* slightly darker gray */ MogrifyBackgroundColor[] = "#fff", /* white */ MogrifyBorderColor[] = "#dfdfdf"; /* sRGB gray */ /* Define declarations. */ #define USE_WAND_METHODS 1 #define MAX_STACK_DEPTH 32 #define UNDEFINED_COMPRESSION_QUALITY 0UL /* FUTURE: why is this default so specific? */ #define DEFAULT_DISSIMILARITY_THRESHOLD "0.31830988618379067154" /* For Debugging Geometry Input */ #define ReportGeometry(flags,info) \ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", \ flags, info.rho, info.sigma, info.xi, info.psi ) /* ** Function to report on the progress of image operations */ static MagickBooleanType MonitorProgress(const char *text, const MagickOffsetType offset,const MagickSizeType extent, void *wand_unused(client_data)) { char message[MagickPathExtent], tag[MagickPathExtent]; const char *locale_message; register char *p; magick_unreferenced(client_data); if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent)) return(MagickTrue); if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0)) return(MagickTrue); (void) CopyMagickString(tag,text,MagickPathExtent); p=strrchr(tag,'/'); if (p != (char *) NULL) *p='\0'; (void) FormatLocaleString(message,MagickPathExtent,"Monitor/%s",tag); locale_message=GetLocaleMessage(message); if (locale_message == message) locale_message=tag; if (p == (char *) NULL) (void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r", locale_message,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); else (void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r", locale_message,p+1,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); if (offset == (MagickOffsetType) (extent-1)) (void) FormatLocaleFile(stderr,"\n"); (void) fflush(stderr); return(MagickTrue); } /* ** GetImageCache() will read an image into a image cache if not already ** present then return the image that is in the cache under that filename. */ static inline Image *GetImageCache(const ImageInfo *image_info,const char *path, ExceptionInfo *exception) { char key[MagickPathExtent]; ExceptionInfo *sans_exception; Image *image; ImageInfo *read_info; (void) FormatLocaleString(key,MagickPathExtent,"cache:%s",path); sans_exception=AcquireExceptionInfo(); image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (image != (Image *) NULL) return(image); read_info=CloneImageInfo(image_info); if (path != (const char *) NULL) (void) CopyMagickString(read_info->filename,path,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) (void) SetImageRegistry(ImageRegistryType,key,image,exception); return(image); } /* SparseColorOption() parse the complex -sparse-color argument into an an array of floating point values than call SparseColorImage(). Argument is a complex mix of floating-point pixel coodinates, and color specifications (or direct floating point numbers). The number of floats needed to represent a color varies depending on the current channel setting. This really should be in MagickCore, so that other API's can make use of it. */ static Image *SparseColorOption(const Image *image, const SparseColorMethod method,const char *arguments,ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p; double *sparse_arguments; Image *sparse_image; PixelInfo color; MagickBooleanType error; register size_t x; size_t number_arguments, number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Limit channels according to image add up number of values needed per color. */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && image->alpha_trait != UndefinedPixelTrait) number_colors++; /* Read string, to determine number of arguments needed, */ p=arguments; x=0; while( *p != '\0' ) { GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == ',' ) continue; if ( isalpha((int) token[0]) || token[0] == '#' ) x += number_colors; /* color argument found */ else x++; /* floating point argument */ } /* control points and color values */ if ((x % (2+number_colors)) != 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","'%s': %s", "sparse-color", "Invalid number of Arguments"); return( (Image *) NULL); } error=MagickFalse; number_arguments=x; /* Allocate and fill in the floating point arguments */ sparse_arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*sparse_arguments)); if (sparse_arguments == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","%s","SparseColorOption"); return( (Image *) NULL); } (void) memset(sparse_arguments,0,number_arguments* sizeof(*sparse_arguments)); p=arguments; x=0; while( *p != '\0' && x < number_arguments ) { /* X coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of X-coord"); error=MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* Y coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "'%s': %s", "sparse-color", "Color found, instead of Y-coord"); error=MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* color name or function given in string argument */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { /* Color string given */ (void) QueryColorCompliance(token,AllCompliance,&color, exception); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) sparse_arguments[x++] = QuantumScale*color.blue; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) sparse_arguments[x++] = QuantumScale*color.black; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && image->alpha_trait != UndefinedPixelTrait) sparse_arguments[x++] = QuantumScale*color.alpha; } else { /* Colors given as a set of floating point values - experimental */ /* NB: token contains the first floating point value to use! */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && image->alpha_trait != UndefinedPixelTrait) { while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } } } if (error != MagickFalse) { sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return((Image *) NULL); } if (number_arguments != x) { sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","'%s': %s","sparse-color","Argument Parsing Error"); return((Image *) NULL); } /* Call the Sparse Color Interpolation function with the parsed arguments */ sparse_image=SparseColorImage(image,method,number_arguments,sparse_arguments, exception); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( sparse_image ); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C L I S e t t i n g O p t i o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLISettingOptionInfo() applies a single settings option into a CLI wand % holding the image_info, draw_info, quantize_info structures that will be % used when processing the images. % % These options do no require images to be present in the CLI wand for them % to be able to be set, in which case they will generally be applied to image % that are read in later % % Options handled by this function are listed in CommandOptions[] of % "option.c" that is one of "SettingOptionFlags" option flags. % % The format of the CLISettingOptionInfo method is: % % void CLISettingOptionInfo(MagickCLI *cli_wand, % const char *option, const char *arg1, const char *arg2) % % A description of each parameter follows: % % o cli_wand: structure holding settings to be applied % % o option: The option string to be set % % o arg1, arg2: optional argument strings to the operation % arg2 is currently only used by "-limit" % */ WandPrivate void CLISettingOptionInfo(MagickCLI *cli_wand, const char *option,const char *arg1n, const char *arg2n) { ssize_t parse; /* option argument parsing (string to value table lookup) */ const char /* percent escaped versions of the args */ *arg1, *arg2; #define _image_info (cli_wand->wand.image_info) #define _image (cli_wand->wand.images) #define _exception (cli_wand->wand.exception) #define _draw_info (cli_wand->draw_info) #define _quantize_info (cli_wand->quantize_info) #define IfSetOption (*option=='-') #define ArgBoolean IfSetOption ? MagickTrue : MagickFalse #define ArgBooleanNot IfSetOption ? MagickFalse : MagickTrue #define ArgBooleanString (IfSetOption?"true":"false") #define ArgOption(def) (IfSetOption?arg1:(const char *)(def)) assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- Setting Option: %s \"%s\" \"%s\"", option,arg1n,arg2n); arg1 = arg1n, arg2 = arg2n; #if 1 #define _process_flags (cli_wand->process_flags) #define _option_type ((CommandOptionFlags) cli_wand->command->flags) /* Interpret Percent Escapes in Arguments - using first image */ if ( (((_process_flags & ProcessInterpretProperities) != 0 ) || ((_option_type & AlwaysInterpretArgsFlag) != 0) ) && ((_option_type & NeverInterpretArgsFlag) == 0) ) { /* Interpret Percent escapes in argument 1 */ if (arg1n != (char *) NULL) { arg1=InterpretImageProperties(_image_info,_image,arg1n,_exception); if (arg1 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg1=arg1n; /* use the given argument as is */ } } if (arg2n != (char *) NULL) { arg2=InterpretImageProperties(_image_info,_image,arg2n,_exception); if (arg2 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg2=arg2n; /* use the given argument as is */ } } } #undef _process_flags #undef _option_type #endif switch (*(option+1)) { case 'a': { if (LocaleCompare("adjoin",option+1) == 0) { _image_info->adjoin = ArgBoolean; break; } if (LocaleCompare("affine",option+1) == 0) { CLIWandWarnReplaced("-draw 'affine ...'"); if (IfSetOption) (void) ParseAffineGeometry(arg1,&_draw_info->affine,_exception); else GetAffineMatrix(&_draw_info->affine); break; } if (LocaleCompare("antialias",option+1) == 0) { _image_info->antialias = _draw_info->stroke_antialias = _draw_info->text_antialias = ArgBoolean; break; } if (LocaleCompare("attenuate",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption("1.0")); break; } if (LocaleCompare("authenticate",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'b': { if (LocaleCompare("background",option+1) == 0) { /* FUTURE: both _image_info attribute & ImageOption in use! _image_info only used directly for generating new images. SyncImageSettings() used to set per-image attribute. FUTURE: if _image_info->background_color is not set then we should fall back to per-image background_color At this time -background will 'wipe out' the per-image background color! Better error handling of QueryColorCompliance() needed. */ (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) QueryColorCompliance(ArgOption(MogrifyBackgroundColor),AllCompliance, &_image_info->background_color,_exception); break; } if (LocaleCompare("bias",option+1) == 0) { /* FUTURE: bias OBSOLETED, replaced by Artifact "convolve:bias" as it is actually rarely used except in direct convolve operations Usage outside a direct convolve operation is actally non-sensible! SyncImageSettings() used to set per-image attribute. */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,"convolve:bias",ArgOption(NULL)); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) { /* Used as a image chromaticity setting SyncImageSettings() used to set per-image attribute. */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); break; } if (LocaleCompare("blue-primary",option+1) == 0) { /* Image chromaticity X,Y NB: Y=X if Y not defined Used by many coders including PNG SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("0.0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("bordercolor",option+1) == 0) { /* FUTURE: both _image_info attribute & ImageOption in use! SyncImageSettings() used to set per-image attribute. Better error checking of QueryColorCompliance(). */ if (IfSetOption) { (void) SetImageOption(_image_info,option+1,arg1); (void) QueryColorCompliance(arg1,AllCompliance, &_image_info->border_color,_exception); (void) QueryColorCompliance(arg1,AllCompliance, &_draw_info->border_color,_exception); break; } (void) DeleteImageOption(_image_info,option+1); (void) QueryColorCompliance(MogrifyBorderColor,AllCompliance, &_image_info->border_color,_exception); (void) QueryColorCompliance(MogrifyBorderColor,AllCompliance, &_draw_info->border_color,_exception); break; } if (LocaleCompare("box",option+1) == 0) { CLIWandWarnReplaced("-undercolor"); CLISettingOptionInfo(cli_wand,"-undercolor",arg1, arg2); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'c': { if (LocaleCompare("cache",option+1) == 0) { MagickSizeType limit; if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); limit=MagickResourceInfinity; if (LocaleCompare("unlimited",arg1) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(arg1,100.0); (void) SetMagickResourceLimit(MemoryResource,limit); (void) SetMagickResourceLimit(MapResource,2*limit); break; } if (LocaleCompare("caption",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("colorspace",option+1) == 0) { /* Setting used for new images via AquireImage() But also used as a SimpleImageOperator Undefined colorspace means don't modify images on read or as a operation */ parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option, arg1); _image_info->colorspace=(ColorspaceType) parse; break; } if (LocaleCompare("comment",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("compose",option+1) == 0) { /* FUTURE: _image_info should be used, SyncImageSettings() used to set per-image attribute. - REMOVE This setting should NOT be used to set image 'compose' "-layer" operators shoud use _image_info if defined otherwise they should use a per-image compose setting. */ parse = ParseCommandOption(MagickComposeOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedComposeOperator", option,arg1); _image_info->compose=(CompositeOperator) parse; (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("compress",option+1) == 0) { /* FUTURE: What should be used? _image_info or ImageOption ??? The former is more efficent, but Crisy prefers the latter! SyncImageSettings() used to set per-image attribute. The coders appears to use _image_info, not Image_Option however the image attribute (for save) is set from the ImageOption! Note that "undefined" is a different setting to "none". */ parse = ParseCommandOption(MagickCompressOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedImageCompression", option,arg1); _image_info->compression=(CompressionType) parse; (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'd': { if (LocaleCompare("debug",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("none"); parse = ParseCommandOption(MagickLogEventOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedEventType", option,arg1); (void) SetLogEventMask(arg1); _image_info->debug=IsEventLogging(); /* extract logging*/ cli_wand->wand.debug=IsEventLogging(); break; } if (LocaleCompare("define",option+1) == 0) { if (LocaleNCompare(arg1,"registry:",9) == 0) { if (IfSetOption) (void) DefineImageRegistry(StringRegistryType,arg1+9,_exception); else (void) DeleteImageRegistry(arg1+9); break; } /* DefineImageOption() equals SetImageOption() but with '=' */ if (IfSetOption) (void) DefineImageOption(_image_info,arg1); else if (DeleteImageOption(_image_info,arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"NoSuchOption",option,arg1); break; } if (LocaleCompare("delay",option+1) == 0) { /* Only used for new images via AcquireImage() FUTURE: Option should also be used for "-morph" (color morphing) */ arg1=ArgOption("0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("density",option+1) == 0) { /* FUTURE: strings used in _image_info attr and _draw_info! Basically as density can be in a XxY form! SyncImageSettings() used to set per-image attribute. */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) CloneString(&_image_info->density,ArgOption(NULL)); (void) CloneString(&_draw_info->density,_image_info->density); break; } if (LocaleCompare("depth",option+1) == 0) { /* This is also a SimpleImageOperator! for 8->16 vaule trunc !!!! SyncImageSettings() used to set per-image attribute. */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); _image_info->depth=IfSetOption?StringToUnsignedLong(arg1) :MAGICKCORE_QUANTUM_DEPTH; break; } if (LocaleCompare("direction",option+1) == 0) { /* Image Option is only used to set _draw_info */ arg1=ArgOption("undefined"); parse = ParseCommandOption(MagickDirectionOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedDirectionType", option,arg1); _draw_info->direction=(DirectionType) parse; (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("display",option+1) == 0) { (void) CloneString(&_image_info->server_name,ArgOption(NULL)); (void) CloneString(&_draw_info->server_name,_image_info->server_name); break; } if (LocaleCompare("dispose",option+1) == 0) { /* only used in setting new images */ arg1=ArgOption("undefined"); parse = ParseCommandOption(MagickDisposeOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedDisposeMethod", option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption("undefined")); break; } if (LocaleCompare("dissimilarity-threshold",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ arg1=ArgOption(DEFAULT_DISSIMILARITY_THRESHOLD); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("dither",option+1) == 0) { /* _image_info attr (on/off), _quantize_info attr (on/off) but also ImageInfo and _quantize_info method! FUTURE: merge the duality of the dithering options */ _image_info->dither = ArgBoolean; (void) SetImageOption(_image_info,option+1,ArgOption("none")); _quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,ArgOption("none")); if (_quantize_info->dither_method == NoDitherMethod) _image_info->dither = MagickFalse; break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'e': { if (LocaleCompare("encoding",option+1) == 0) { (void) CloneString(&_draw_info->encoding,ArgOption("undefined")); (void) SetImageOption(_image_info,option+1,_draw_info->encoding); break; } if (LocaleCompare("endian",option+1) == 0) { /* Both _image_info attr and ImageInfo */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickEndianOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedEndianType", option,arg1); /* FUTURE: check alloc/free of endian string! - remove? */ _image_info->endian=(EndianType) (*arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("extract",option+1) == 0) { (void) CloneString(&_image_info->extract,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'f': { if (LocaleCompare("family",option+1) == 0) { (void) CloneString(&_draw_info->family,ArgOption(NULL)); break; } if (LocaleCompare("features",option+1) == 0) { (void) SetImageOption(_image_info,"identify:features", ArgBooleanString); if (IfSetOption) (void) SetImageArtifact(_image,"verbose","true"); break; } if (LocaleCompare("fill",option+1) == 0) { /* Set "fill" OR "fill-pattern" in _draw_info The original fill color is preserved if a fill-pattern is given. That way it does not effect other operations that directly using the fill color and, can be retored using "+tile". */ MagickBooleanType status; ExceptionInfo *sans; PixelInfo color; arg1 = ArgOption("none"); /* +fill turns it off! */ (void) SetImageOption(_image_info,option+1,arg1); if (_draw_info->fill_pattern != (Image *) NULL) _draw_info->fill_pattern=DestroyImage(_draw_info->fill_pattern); /* is it a color or a image? -- ignore exceptions */ sans=AcquireExceptionInfo(); status=QueryColorCompliance(arg1,AllCompliance,&color,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) _draw_info->fill_pattern=GetImageCache(_image_info,arg1,_exception); else _draw_info->fill=color; break; } if (LocaleCompare("filter",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickFilterOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedImageFilter", option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("font",option+1) == 0) { (void) CloneString(&_draw_info->font,ArgOption(NULL)); (void) CloneString(&_image_info->font,_draw_info->font); break; } if (LocaleCompare("format",option+1) == 0) { /* FUTURE: why the ping test, you could set ping after this! */ /* register const char *q; for (q=strchr(arg1,'%'); q != (char *) NULL; q=strchr(q+1,'%')) if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL) _image_info->ping=MagickFalse; */ (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("fuzz",option+1) == 0) { /* Option used to set image fuzz! unless blank canvas (from color) Image attribute used for color compare operations SyncImageSettings() used to set per-image attribute. FUTURE: Can't find anything else using _image_info->fuzz directly! convert structure attribute to 'option' string */ arg1=ArgOption("0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); _image_info->fuzz=StringToDoubleInterval(arg1,(double) QuantumRange+1.0); (void) SetImageOption(_image_info,option+1,arg1); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'g': { if (LocaleCompare("gravity",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("none"); parse = ParseCommandOption(MagickGravityOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedGravityType", option,arg1); _draw_info->gravity=(GravityType) parse; (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("green-primary",option+1) == 0) { /* Image chromaticity X,Y NB: Y=X if Y not defined SyncImageSettings() used to set per-image attribute. Used directly by many coders */ arg1=ArgOption("0.0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'h': { if (LocaleCompare("highlight-color",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ (void) SetImageOption(_image_info,"compare:highlight-color", ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'i': { if (LocaleCompare("intensity",option+1) == 0) { arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickPixelIntensityOptions,MagickFalse, arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedIntensityType", option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("intent",option+1) == 0) { /* Only used by coders: MIFF, MPC, BMP, PNG and for image profile call to AcquireTransformThreadSet() SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickIntentOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedIntentType", option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("interlace",option+1) == 0) { /* _image_info is directly used by coders (so why an image setting?) SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickInterlaceOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedInterlaceType", option,arg1); _image_info->interlace=(InterlaceType) parse; (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1, ArgOption(NULL)); _draw_info->interline_spacing=StringToDouble(ArgOption("0"), (char **) NULL); break; } if (LocaleCompare("interpolate",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1 = ArgOption("undefined"); parse = ParseCommandOption(MagickInterpolateOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedInterpolateMethod", option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1, ArgOption(NULL)); _draw_info->interword_spacing=StringToDouble(ArgOption("0"),(char **) NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); _draw_info->kerning=StringToDouble(ArgOption("0"),(char **) NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'l': { if (LocaleCompare("label",option+1) == 0) { /* only used for new images - not in SyncImageOptions() */ (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("limit",option+1) == 0) { MagickSizeType limit; limit=MagickResourceInfinity; parse= ParseCommandOption(MagickResourceOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedResourceType", option,arg1); if (LocaleCompare("unlimited",arg2) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(arg2,100.0); (void) SetMagickResourceLimit((ResourceType)parse,limit); break; } if (LocaleCompare("log",option+1) == 0) { if (IfSetOption) { if ((strchr(arg1,'%') == (char *) NULL)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetLogFormat(arg1); } break; } if (LocaleCompare("lowlight-color",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ (void) SetImageOption(_image_info,"compare:lowlight-color", ArgOption(NULL)); break; } if (LocaleCompare("loop",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'm': { if (LocaleCompare("mattecolor",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) QueryColorCompliance(ArgOption(MogrifyAlphaColor), AllCompliance,&_image_info->matte_color,_exception); break; } if (LocaleCompare("metric",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ parse=ParseCommandOption(MagickMetricOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedMetricType", option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } if (LocaleCompare("moments",option+1) == 0) { (void) SetImageOption(_image_info,"identify:moments", ArgBooleanString); if (IfSetOption) (void) SetImageArtifact(_image,"verbose","true"); break; } if (LocaleCompare("monitor",option+1) == 0) { (void) SetImageInfoProgressMonitor(_image_info, IfSetOption? MonitorProgress: (MagickProgressMonitor) NULL, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { /* Setting (used by some input coders!) -- why? Warning: This is also Special '-type' SimpleOperator */ _image_info->monochrome= ArgBoolean; break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'o': { if (LocaleCompare("orient",option+1) == 0) { /* Is not used when defining for new images. This makes it more of a 'operation' than a setting FUTURE: make set meta-data operator instead. SyncImageSettings() used to set per-image attribute. */ parse=ParseCommandOption(MagickOrientationOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedImageOrientation", option,arg1); _image_info->orientation=(OrientationType)parse; (void) SetImageOption(_image_info,option+1, ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'p': { if (LocaleCompare("page",option+1) == 0) { /* Only used for new images and image generators. SyncImageSettings() used to set per-image attribute. ????? That last is WRONG!!!! FUTURE: adjust named 'page' sizes according density */ char *canonical_page, page[MagickPathExtent]; const char *image_option; MagickStatusType flags; RectangleInfo geometry; if (!IfSetOption) { (void) DeleteImageOption(_image_info,option+1); (void) CloneString(&_image_info->page,(char *) NULL); break; } (void) memset(&geometry,0,sizeof(geometry)); image_option=GetImageOption(_image_info,"page"); if (image_option != (const char *) NULL) flags=ParseAbsoluteGeometry(image_option,&geometry); canonical_page=GetPageGeometry(arg1); flags=ParseAbsoluteGeometry(canonical_page,&geometry); canonical_page=DestroyString(canonical_page); (void) FormatLocaleString(page,MagickPathExtent,"%lux%lu", (unsigned long) geometry.width,(unsigned long) geometry.height); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) FormatLocaleString(page,MagickPathExtent,"%lux%lu%+ld%+ld", (unsigned long) geometry.width,(unsigned long) geometry.height, (long) geometry.x,(long) geometry.y); (void) SetImageOption(_image_info,option+1,page); (void) CloneString(&_image_info->page,page); break; } if (LocaleCompare("ping",option+1) == 0) { _image_info->ping = ArgBoolean; break; } if (LocaleCompare("pointsize",option+1) == 0) { if (IfSetOption) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); _image_info->pointsize = _draw_info->pointsize = StringToDouble(arg1,(char **) NULL); } else { _image_info->pointsize=0.0; /* unset pointsize */ _draw_info->pointsize=12.0; } break; } if (LocaleCompare("precision",option+1) == 0) { arg1=ArgOption("-1"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetMagickPrecision(StringToInteger(arg1)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'q': { if (LocaleCompare("quality",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); _image_info->quality= IfSetOption ? StringToUnsignedLong(arg1) : UNDEFINED_COMPRESSION_QUALITY; (void) SetImageOption(_image_info,option+1,ArgOption("0")); break; } if (LocaleCompare("quantize",option+1) == 0) { /* Just a set direct in _quantize_info */ arg1=ArgOption("undefined"); parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace", option,arg1); _quantize_info->colorspace=(ColorspaceType)parse; break; } if (LocaleCompare("quiet",option+1) == 0) { /* FUTURE: if two -quiet is performed you can not do +quiet! This needs to be checked over thoughly. */ static WarningHandler warning_handler = (WarningHandler) NULL; WarningHandler tmp = SetWarningHandler((WarningHandler) NULL); if ( tmp != (WarningHandler) NULL) warning_handler = tmp; /* remember the old handler */ if (!IfSetOption) /* set the old handler */ warning_handler=SetWarningHandler(warning_handler); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'r': { if (LocaleCompare("red-primary",option+1) == 0) { /* Image chromaticity X,Y NB: Y=X if Y not defined Used by many coders SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("0.0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("regard-warnings",option+1) == 0) /* FUTURE: to be replaced by a 'fatal-level' type setting */ break; if (LocaleCompare("render",option+1) == 0) { /* _draw_info only setting */ _draw_info->render= ArgBooleanNot; break; } if (LocaleCompare("respect-parenthesis",option+1) == 0) { /* link image and setting stacks - option is itself saved on stack! */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 's': { if (LocaleCompare("sampling-factor",option+1) == 0) { /* FUTURE: should be converted to jpeg:sampling_factor */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) CloneString(&_image_info->sampling_factor,ArgOption(NULL)); break; } if (LocaleCompare("scene",option+1) == 0) { /* SyncImageSettings() used to set this as a per-image attribute. What ??? Why ???? */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); _image_info->scene=StringToUnsignedLong(ArgOption("0")); break; } if (LocaleCompare("seed",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); SetRandomSecretKey( IfSetOption ? (unsigned long) StringToUnsignedLong(arg1) : (unsigned long) time((time_t *) NULL)); break; } if (LocaleCompare("size",option+1) == 0) { /* FUTURE: string in _image_info -- convert to Option ??? Look at the special handling for "size" in SetImageOption() */ (void) CloneString(&_image_info->size,ArgOption(NULL)); break; } if (LocaleCompare("stretch",option+1) == 0) { arg1=ArgOption("undefined"); parse = ParseCommandOption(MagickStretchOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedStretchType", option,arg1); _draw_info->stretch=(StretchType) parse; break; } if (LocaleCompare("stroke",option+1) == 0) { /* set stroke color OR stroke-pattern UPDATE: ensure stroke color is not destroyed is a pattern is given. Just in case the color is also used for other purposes. */ MagickBooleanType status; ExceptionInfo *sans; PixelInfo color; arg1 = ArgOption("none"); /* +fill turns it off! */ (void) SetImageOption(_image_info,option+1,arg1); if (_draw_info->stroke_pattern != (Image *) NULL) _draw_info->stroke_pattern=DestroyImage(_draw_info->stroke_pattern); /* is it a color or a image? -- ignore exceptions */ sans=AcquireExceptionInfo(); status=QueryColorCompliance(arg1,AllCompliance,&color,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) _draw_info->stroke_pattern=GetImageCache(_image_info,arg1,_exception); else _draw_info->stroke=color; break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); _draw_info->stroke_width=StringToDouble(ArgOption("1.0"), (char **) NULL); break; } if (LocaleCompare("style",option+1) == 0) { arg1=ArgOption("undefined"); parse = ParseCommandOption(MagickStyleOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedStyleType", option,arg1); _draw_info->style=(StyleType) parse; break; } #if 0 if (LocaleCompare("subimage-search",option+1) == 0) { /* FUTURE: this is only used by CompareImages() which is used only by the "compare" CLI program at this time. */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); break; } #endif if (LocaleCompare("synchronize",option+1) == 0) { /* FUTURE: syncronize to storage - but what does that mean? */ _image_info->synchronize = ArgBoolean; break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 't': { if (LocaleCompare("taint",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); break; } if (LocaleCompare("texture",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ /* FUTURE: move _image_info string to option splay-tree Other than "montage" what uses "texture" ???? */ (void) CloneString(&_image_info->texture,ArgOption(NULL)); break; } if (LocaleCompare("tile",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ _draw_info->fill_pattern=IfSetOption ?GetImageCache(_image_info,arg1,_exception) :DestroyImage(_draw_info->fill_pattern); break; } if (LocaleCompare("tile-offset",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. ??? */ arg1=ArgOption("0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("transparent-color",option+1) == 0) { /* FUTURE: both _image_info attribute & ImageOption in use! _image_info only used for generating new images. SyncImageSettings() used to set per-image attribute. Note that +transparent-color, means fall-back to image attribute so ImageOption is deleted, not set to a default. */ if (IfSetOption && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) QueryColorCompliance(ArgOption("none"),AllCompliance, &_image_info->transparent_color,_exception); break; } if (LocaleCompare("treedepth",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); _quantize_info->tree_depth=StringToUnsignedLong(ArgOption("0")); break; } if (LocaleCompare("type",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. */ parse=ParseCommandOption(MagickTypeOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedImageType", option,arg1); _image_info->type=(ImageType) parse; (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); (void) QueryColorCompliance(ArgOption("none"),AllCompliance, &_draw_info->undercolor,_exception); break; } if (LocaleCompare("units",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. Should this effect _draw_info X and Y resolution? FUTURE: this probably should be part of the density setting */ parse=ParseCommandOption(MagickResolutionOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedUnitsType", option,arg1); _image_info->units=(ResolutionType) parse; (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { /* FUTURE: Remember all options become image artifacts _image_info->verbose is only used by coders. */ (void) SetImageOption(_image_info,option+1,ArgBooleanString); _image_info->verbose= ArgBoolean; _image_info->ping=MagickFalse; /* verbose can't be a ping */ break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { /* SyncImageSettings() used to set per-image attribute. This is VERY deep in the image caching structure. */ parse=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, ArgOption("undefined")); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedVirtualPixelMethod", option,arg1); (void) SetImageOption(_image_info,option+1,ArgOption(NULL)); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'w': { if (LocaleCompare("weight",option+1) == 0) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,arg1); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(arg1); _draw_info->weight=(size_t) weight; break; } if (LocaleCompare("white-point",option+1) == 0) { /* Used as a image chromaticity setting SyncImageSettings() used to set per-image attribute. */ arg1=ArgOption("0.0"); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SetImageOption(_image_info,option+1,arg1); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } default: CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } /* clean up percent escape interpreted strings */ if ((arg1 && arg1n) && (arg1 != arg1n )) arg1=DestroyString((char *) arg1); if ((arg2 && arg2n) && (arg2 != arg2n )) arg2=DestroyString((char *) arg2); #undef _image_info #undef _exception #undef _draw_info #undef _quantize_info #undef IfSetOption #undef ArgBoolean #undef ArgBooleanNot #undef ArgBooleanString #undef ArgOption return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C L I S i m p l e O p e r a t o r I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLISimpleOperatorImages() applys one simple image operation given to all % the images in the CLI wand, using any per-image or global settings that was % previously saved in the CLI wand. % % It is assumed that any such settings are up-to-date. % % The format of the WandSimpleOperatorImages method is: % % MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand,const char *option, % const char *arg1, const char *arg2,ExceptionInfo *exception) % % A description of each parameter follows: % % o cli_wand: structure holding settings and images to be operated on % % o option: The option string for the operation % % o arg1, arg2: optional argument strings to the operation % */ /* CLISimpleOperatorImage() is an Internal subrountine to apply one simple image operation to the current image pointed to by the CLI wand. The image in the list may be modified in three different ways... * directly modified (EG: -negate, -gamma, -level, -annotate, -draw), * replaced by a new image (EG: -spread, -resize, -rotate, -morphology) * one image replace by a list of images (-separate and -crop only!) In each case the result replaces the single original image in the list, as well as the pointer to the modified image (last image added if replaced by a list of images) is returned. As the image pointed to may be replaced, the first image in the list may also change. GetFirstImageInList() should be used by caller if they wish return the Image pointer to the first image in list. */ static MagickBooleanType CLISimpleOperatorImage(MagickCLI *cli_wand, const char *option, const char *arg1n, const char *arg2n, ExceptionInfo *exception) { Image * new_image; GeometryInfo geometry_info; RectangleInfo geometry; MagickStatusType flags; ssize_t parse; const char /* percent escaped versions of the args */ *arg1, *arg2; #define _image_info (cli_wand->wand.image_info) #define _image (cli_wand->wand.images) #define _exception (cli_wand->wand.exception) #define _draw_info (cli_wand->draw_info) #define _quantize_info (cli_wand->quantize_info) #define _process_flags (cli_wand->process_flags) #define _option_type ((CommandOptionFlags) cli_wand->command->flags) #define IfNormalOp (*option=='-') #define IfPlusOp (*option!='-') #define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse #define IsPlusOp IfNormalOp ? MagickFalse : MagickTrue assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); assert(_image != (Image *) NULL); /* an image must be present */ if (cli_wand->wand.debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",cli_wand->wand.name); arg1 = arg1n, arg2 = arg2n; /* Interpret Percent Escapes in Arguments - using first image */ if ( (((_process_flags & ProcessInterpretProperities) != 0 ) || ((_option_type & AlwaysInterpretArgsFlag) != 0) ) && ((_option_type & NeverInterpretArgsFlag) == 0) ) { /* Interpret Percent escapes in argument 1 */ if (arg1n != (char *) NULL) { arg1=InterpretImageProperties(_image_info,_image,arg1n,_exception); if (arg1 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg1=arg1n; /* use the given argument as is */ } } if (arg2n != (char *) NULL) { arg2=InterpretImageProperties(_image_info,_image,arg2n,_exception); if (arg2 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg2=arg2n; /* use the given argument as is */ } } } #undef _process_flags #undef _option_type #if 0 (void) FormatLocaleFile(stderr, "CLISimpleOperatorImage: \"%s\" \"%s\" \"%s\"\n",option,arg1,arg2); #endif new_image = (Image *) NULL; /* the replacement image, if not null at end */ SetGeometryInfo(&geometry_info); switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=AdaptiveBlurImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { /* FUTURE: Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=AdaptiveResizeImage(_image,geometry.width,geometry.height, _exception); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=AdaptiveSharpenImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("alpha",option+1) == 0) { parse=ParseCommandOption(MagickAlphaChannelOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedAlphaChannelOption", option,arg1); (void) SetImageAlphaChannel(_image,(AlphaChannelOption) parse, _exception); break; } if (LocaleCompare("annotate",option+1) == 0) { char geometry[MagickPathExtent]; SetGeometryInfo(&geometry_info); flags=ParseGeometry(arg1,&geometry_info); if (flags == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; (void) CloneString(&_draw_info->text,arg2); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", geometry_info.xi,geometry_info.psi); (void) CloneString(&_draw_info->geometry,geometry); _draw_info->affine.sx=cos(DegreesToRadians( fmod(geometry_info.rho,360.0))); _draw_info->affine.rx=sin(DegreesToRadians( fmod(geometry_info.rho,360.0))); _draw_info->affine.ry=(-sin(DegreesToRadians( fmod(geometry_info.sigma,360.0)))); _draw_info->affine.sy=cos(DegreesToRadians( fmod(geometry_info.sigma,360.0))); (void) AnnotateImage(_image,_draw_info,_exception); GetAffineMatrix(&_draw_info->affine); break; } if (LocaleCompare("auto-gamma",option+1) == 0) { (void) AutoGammaImage(_image,_exception); break; } if (LocaleCompare("auto-level",option+1) == 0) { (void) AutoLevelImage(_image,_exception); break; } if (LocaleCompare("auto-orient",option+1) == 0) { new_image=AutoOrientImage(_image,_image->orientation,_exception); break; } if (LocaleCompare("auto-threshold",option+1) == 0) { AutoThresholdMethod method; method=(AutoThresholdMethod) ParseCommandOption( MagickAutoThresholdOptions,MagickFalse,arg1); (void) AutoThresholdImage(_image,method,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'b': { if (LocaleCompare("black-threshold",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) BlackThresholdImage(_image,arg1,_exception); break; } if (LocaleCompare("blue-shift",option+1) == 0) { geometry_info.rho=1.5; if (IfNormalOp) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); } new_image=BlueShiftImage(_image,geometry_info.rho,_exception); break; } if (LocaleCompare("blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=BlurImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("border",option+1) == 0) { CompositeOperator compose; const char* value; flags=ParsePageGeometry(_image,arg1,&geometry,_exception); if ((flags & (WidthValue | HeightValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); compose=OverCompositeOp; value=GetImageOption(_image_info,"compose"); if (value != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,value); new_image=BorderImage(_image,&geometry,compose,_exception); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { double brightness, contrast; GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); brightness=geometry_info.rho; contrast=0.0; if ((flags & SigmaValue) != 0) contrast=geometry_info.sigma; (void) BrightnessContrastImage(_image,brightness,contrast, _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'c': { if (LocaleCompare("canny",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=10; if ((flags & PsiValue) == 0) geometry_info.psi=30; if ((flags & PercentValue) != 0) { geometry_info.xi/=100.0; geometry_info.psi/=100.0; } new_image=CannyEdgeImage(_image,geometry_info.rho,geometry_info.sigma, geometry_info.xi,geometry_info.psi,_exception); break; } if (LocaleCompare("cdl",option+1) == 0) { char *color_correction_collection; /* Note: arguments do not have percent escapes expanded */ /* Color correct with a color decision list. */ color_correction_collection=FileToString(arg1,~0UL,_exception); if (color_correction_collection == (char *) NULL) break; (void) ColorDecisionListImage(_image,color_correction_collection, _exception); break; } if (LocaleCompare("channel",option+1) == 0) { if (IfPlusOp) { (void) SetPixelChannelMask(_image,DefaultChannels); break; } parse=ParseChannelOption(arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedChannelType",option, arg1); (void) SetPixelChannelMask(_image,(ChannelType) parse); break; } if (LocaleCompare("charcoal",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; new_image=CharcoalImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("chop",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseGravityGeometry(_image,arg1,&geometry,_exception); new_image=ChopImage(_image,&geometry,_exception); break; } if (LocaleCompare("clahe",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseGeometry(arg1,&geometry_info); flags=ParseRegionGeometry(_image,arg1,&geometry,_exception); (void) CLAHEImage(_image,geometry.width,geometry.height, (size_t) geometry.x,geometry_info.psi,_exception); break; } if (LocaleCompare("clamp",option+1) == 0) { (void) ClampImage(_image,_exception); break; } if (LocaleCompare("clip",option+1) == 0) { if (IfNormalOp) (void) ClipImage(_image,_exception); else /* "+mask" remove the write mask */ (void) SetImageMask(_image,WritePixelMask,(Image *) NULL, _exception); break; } if (LocaleCompare("clip-mask",option+1) == 0) { Image *clip_mask; if (IfPlusOp) { /* use "+clip-mask" Remove the write mask for -clip-path */ (void) SetImageMask(_image,WritePixelMask,(Image *) NULL,_exception); break; } clip_mask=GetImageCache(_image_info,arg1,_exception); if (clip_mask == (Image *) NULL) break; (void) SetImageMask(_image,WritePixelMask,clip_mask,_exception); clip_mask=DestroyImage(clip_mask); break; } if (LocaleCompare("clip-path",option+1) == 0) { (void) ClipImagePath(_image,arg1,IsNormalOp,_exception); /* Note: Use "+clip-mask" remove the write mask added */ break; } if (LocaleCompare("colorize",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ColorizeImage(_image,arg1,&_draw_info->fill,_exception); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel; kernel=AcquireKernelInfo(arg1,exception); if (kernel == (KernelInfo *) NULL) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ColorMatrixImage(_image,kernel,_exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("colors",option+1) == 0) { /* Reduce the number of colors in the image. FUTURE: also provide 'plus version with image 'color counts' */ _quantize_info->number_colors=StringToUnsignedLong(arg1); if (_quantize_info->number_colors == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((_image->storage_class == DirectClass) || _image->colors > _quantize_info->number_colors) (void) QuantizeImage(_quantize_info,_image,_exception); else (void) CompressImageColormap(_image,_exception); break; } if (LocaleCompare("colorspace",option+1) == 0) { /* WARNING: this is both a image_info setting (already done) and a operator to change image colorspace. FUTURE: default colorspace should be sRGB! Unless some type of 'linear colorspace' mode is set. Note that +colorspace sets "undefined" or no effect on new images, but forces images already in memory back to RGB! That seems to be a little strange! */ (void) TransformImageColorspace(_image, IfNormalOp ? _image_info->colorspace : sRGBColorspace, _exception); break; } if (LocaleCompare("connected-components",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ConnectedComponentsImage(_image,(size_t) StringToInteger(arg1),(CCObjectInfo **) NULL,_exception); break; } if (LocaleCompare("contrast",option+1) == 0) { CLIWandWarnReplaced(IfNormalOp?"-level":"+level"); (void) ContrastImage(_image,IsNormalOp,_exception); break; } if (LocaleCompare("contrast-stretch",option+1) == 0) { double black_point, white_point; MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); black_point=geometry_info.rho; white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma : black_point; if ((flags & PercentValue) != 0) { black_point*=(double) _image->columns*_image->rows/100.0; white_point*=(double) _image->columns*_image->rows/100.0; } white_point=(double) _image->columns*_image->rows-white_point; (void) ContrastStretchImage(_image,black_point,white_point, _exception); break; } if (LocaleCompare("convolve",option+1) == 0) { double gamma; KernelInfo *kernel_info; register ssize_t j; kernel_info=AcquireKernelInfo(arg1,exception); if (kernel_info == (KernelInfo *) NULL) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); gamma=0.0; for (j=0; j < (ssize_t) (kernel_info->width*kernel_info->height); j++) gamma+=kernel_info->values[j]; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); for (j=0; j < (ssize_t) (kernel_info->width*kernel_info->height); j++) kernel_info->values[j]*=gamma; new_image=MorphologyImage(_image,CorrelateMorphology,1,kernel_info, _exception); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("crop",option+1) == 0) { /* WARNING: This can generate multiple images! */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=CropImageToTiles(_image,arg1,_exception); break; } if (LocaleCompare("cycle",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) CycleColormapImage(_image,(ssize_t) StringToLong(arg1), _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ StringInfo *passkey; passkey=FileToStringInfo(arg1,~0UL,_exception); if (passkey == (StringInfo *) NULL) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) PasskeyDecipherImage(_image,passkey,_exception); passkey=DestroyStringInfo(passkey); break; } if (LocaleCompare("depth",option+1) == 0) { /* The _image_info->depth setting has already been set We just need to apply it to all images in current sequence WARNING: Depth from 8 to 16 causes 'quantum rounding to images! That is it really is an operation, not a setting! Arrgghhh FUTURE: this should not be an operator!!! */ (void) SetImageDepth(_image,_image_info->depth,_exception); break; } if (LocaleCompare("deskew",option+1) == 0) { double threshold; if (IfNormalOp) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); threshold=StringToDoubleInterval(arg1,(double) QuantumRange+1.0); } else threshold=40.0*QuantumRange/100.0; new_image=DeskewImage(_image,threshold,_exception); break; } if (LocaleCompare("despeckle",option+1) == 0) { new_image=DespeckleImage(_image,_exception); break; } if (LocaleCompare("distort",option+1) == 0) { double *args; ssize_t count; parse = ParseCommandOption(MagickDistortOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedDistortMethod", option,arg1); if ((DistortMethod) parse == ResizeDistortion) { double resize_args[2]; /* Special Case - Argument is actually a resize geometry! ** Convert that to an appropriate distortion argument array. ** FUTURE: make a separate special resize operator Roll into a resize special operator */ if (IsGeometry(arg2) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidGeometry", option,arg2); (void) ParseRegionGeometry(_image,arg2,&geometry,_exception); resize_args[0]=(double) geometry.width; resize_args[1]=(double) geometry.height; new_image=DistortImage(_image,(DistortMethod) parse, (size_t)2,resize_args,MagickTrue,_exception); break; } /* convert argument string into an array of doubles */ args = StringToArrayOfDoubles(arg2,&count,_exception); if (args == (double *) NULL ) CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg2); new_image=DistortImage(_image,(DistortMethod) parse,(size_t) count,args,IsPlusOp,_exception); args=(double *) RelinquishMagickMemory(args); break; } if (LocaleCompare("draw",option+1) == 0) { (void) CloneString(&_draw_info->primitive,arg1); (void) DrawImage(_image,_draw_info,_exception); (void) CloneString(&_draw_info->primitive,(char *) NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'e': { if (LocaleCompare("edge",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=EdgeImage(_image,geometry_info.rho,_exception); break; } if (LocaleCompare("emboss",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=EmbossImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("encipher",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ StringInfo *passkey; passkey=FileToStringInfo(arg1,~0UL,_exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyEncipherImage(_image,passkey,_exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("enhance",option+1) == 0) { new_image=EnhanceImage(_image,_exception); break; } if (LocaleCompare("equalize",option+1) == 0) { (void) EqualizeImage(_image,_exception); break; } if (LocaleCompare("evaluate",option+1) == 0) { double constant; parse = ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator", option,arg1); if (IsGeometry(arg2) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2); constant=StringToDoubleInterval(arg2,(double) QuantumRange+1.0); (void) EvaluateImage(_image,(MagickEvaluateOperator)parse,constant, _exception); break; } if (LocaleCompare("extent",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseGravityGeometry(_image,arg1,&geometry,_exception); if (geometry.width == 0) geometry.width=_image->columns; if (geometry.height == 0) geometry.height=_image->rows; new_image=ExtentImage(_image,&geometry,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'f': { if (LocaleCompare("flip",option+1) == 0) { new_image=FlipImage(_image,_exception); break; } if (LocaleCompare("flop",option+1) == 0) { new_image=FlopImage(_image,_exception); break; } if (LocaleCompare("floodfill",option+1) == 0) { PixelInfo target; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParsePageGeometry(_image,arg1,&geometry,_exception); (void) QueryColorCompliance(arg2,AllCompliance,&target,_exception); (void) FloodfillPaintImage(_image,_draw_info,&target,geometry.x, geometry.y,IsPlusOp,_exception); break; } if (LocaleCompare("frame",option+1) == 0) { FrameInfo frame_info; CompositeOperator compose; const char* value; value=GetImageOption(_image_info,"compose"); compose=OverCompositeOp; /* use Over not _image->compose */ if (value != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,value); if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParsePageGeometry(_image,arg1,&geometry,_exception); frame_info.width=geometry.width; frame_info.height=geometry.height; frame_info.outer_bevel=geometry.x; frame_info.inner_bevel=geometry.y; frame_info.x=(ssize_t) frame_info.width; frame_info.y=(ssize_t) frame_info.height; frame_info.width=_image->columns+2*frame_info.width; frame_info.height=_image->rows+2*frame_info.height; new_image=FrameImage(_image,&frame_info,compose,_exception); break; } if (LocaleCompare("function",option+1) == 0) { double *args; ssize_t count; parse=ParseCommandOption(MagickFunctionOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedFunction", option,arg1); /* convert argument string into an array of doubles */ args = StringToArrayOfDoubles(arg2,&count,_exception); if (args == (double *) NULL ) CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg2); (void) FunctionImage(_image,(MagickFunction)parse,(size_t) count,args, _exception); args=(double *) RelinquishMagickMemory(args); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { double constant; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); constant=StringToDouble(arg1,(char **) NULL); #if 0 /* Using Gamma, via a cache */ if (IfPlusOp) constant=PerceptibleReciprocal(constant); (void) GammaImage(_image,constant,_exception); #else /* Using Evaluate POW, direct update of values - more accurite */ if (IfNormalOp) constant=PerceptibleReciprocal(constant); (void) EvaluateImage(_image,PowEvaluateOperator,constant,_exception); _image->gamma*=StringToDouble(arg1,(char **) NULL); #endif /* Set gamma setting -- Old meaning of "+gamma" * _image->gamma=StringToDouble(arg1,(char **) NULL); */ break; } if (LocaleCompare("gaussian-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=GaussianBlurImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("gaussian",option+1) == 0) { CLIWandWarnReplaced("-gaussian-blur"); (void) CLISimpleOperatorImage(cli_wand,"-gaussian-blur",arg1,NULL,exception); } if (LocaleCompare("geometry",option+1) == 0) { /* Record Image offset for composition. (A Setting) Resize last _image. (ListOperator) -- DEPRECIATE FUTURE: Why if no 'offset' does this resize ALL images? Also why is the setting recorded in the IMAGE non-sense! */ if (IfPlusOp) { /* remove the previous composition geometry offset! */ if (_image->geometry != (char *) NULL) _image->geometry=DestroyString(_image->geometry); break; } if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseRegionGeometry(_image,arg1,&geometry,_exception); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) CloneString(&_image->geometry,arg1); else new_image=ResizeImage(_image,geometry.width,geometry.height, _image->filter,_exception); break; } if (LocaleCompare("grayscale",option+1) == 0) { parse=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedIntensityMethod", option,arg1); (void) GrayscaleImage(_image,(PixelIntensityMethod) parse,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'h': { if (LocaleCompare("hough-lines",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=40; new_image=HoughLineImage(_image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(size_t) geometry_info.xi,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'i': { if (LocaleCompare("identify",option+1) == 0) { const char *format, *text; format=GetImageOption(_image_info,"format"); if (format == (char *) NULL) { (void) IdentifyImage(_image,stdout,_image_info->verbose, _exception); break; } text=InterpretImageProperties(_image_info,_image,format,_exception); if (text == (char *) NULL) CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure", option); (void) fputs(text,stdout); text=DestroyString((char *)text); break; } if (LocaleCompare("implode",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ImplodeImage(_image,geometry_info.rho,_image->interpolate, _exception); break; } if (LocaleCompare("interpolative-resize",option+1) == 0) { /* FUTURE: New to IMv7 Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=InterpolativeResizeImage(_image,geometry.width, geometry.height,_image->interpolate,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'k': { if (LocaleCompare("kuwahara",option+1) == 0) { /* Edge preserving blur. */ flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho-0.5; new_image=KuwaharaImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'l': { if (LocaleCompare("lat",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; new_image=AdaptiveThresholdImage(_image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(double) geometry_info.xi, _exception); break; } if (LocaleCompare("level",option+1) == 0) { double black_point, gamma, white_point; MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) (QuantumRange/100.0); white_point*=(double) (QuantumRange/100.0); } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if (IfPlusOp || ((flags & AspectValue) != 0)) (void) LevelizeImage(_image,black_point,white_point,gamma,_exception); else (void) LevelImage(_image,black_point,white_point,gamma,_exception); break; } if (LocaleCompare("level-colors",option+1) == 0) { char token[MagickPathExtent]; const char *p; PixelInfo black_point, white_point; p=(const char *) arg1; GetNextToken(p,&p,MagickPathExtent,token); /* get black point color */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryColorCompliance(token,AllCompliance, &black_point,_exception); else (void) QueryColorCompliance("#000000",AllCompliance, &black_point,_exception); if (isalpha((int) token[0]) || (token[0] == '#')) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '\0') white_point=black_point; /* set everything to that color */ else { if ((isalpha((int) *token) == 0) && ((*token == '#') == 0)) GetNextToken(p,&p,MagickPathExtent,token); /* Get white point color. */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryColorCompliance(token,AllCompliance, &white_point,_exception); else (void) QueryColorCompliance("#ffffff",AllCompliance, &white_point,_exception); } (void) LevelImageColors(_image,&black_point,&white_point, IsPlusOp,_exception); break; } if (LocaleCompare("linear-stretch",option+1) == 0) { double black_point, white_point; MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); black_point=geometry_info.rho; white_point=(double) _image->columns*_image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) _image->columns*_image->rows/100.0; white_point*=(double) _image->columns*_image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) _image->columns*_image->rows- black_point; (void) LinearStretchImage(_image,black_point,white_point,_exception); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { /* FUTURE: Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseRegionGeometry(_image,arg1,&geometry,_exception); if ((flags & XValue) == 0) geometry.x=1; if ((flags & YValue) == 0) geometry.y=0; new_image=LiquidRescaleImage(_image,geometry.width, geometry.height,1.0*geometry.x,1.0*geometry.y,_exception); break; } if (LocaleCompare("local-contrast",option+1) == 0) { MagickStatusType flags; flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) geometry_info.rho=10; if ((flags & SigmaValue) == 0) geometry_info.sigma=12.5; new_image=LocalContrastImage(_image,geometry_info.rho, geometry_info.sigma,exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'm': { if (LocaleCompare("magnify",option+1) == 0) { new_image=MagnifyImage(_image,_exception); break; } if (LocaleCompare("map",option+1) == 0) { CLIWandWarnReplaced("-remap"); (void) CLISimpleOperatorImage(cli_wand,"-remap",NULL,NULL,exception); break; } if (LocaleCompare("mask",option+1) == 0) { Image *mask; if (IfPlusOp) { /* Remove a mask. */ (void) SetImageMask(_image,WritePixelMask,(Image *) NULL, _exception); break; } /* Set the image mask. */ mask=GetImageCache(_image_info,arg1,_exception); if (mask == (Image *) NULL) break; (void) SetImageMask(_image,WritePixelMask,mask,_exception); mask=DestroyImage(mask); break; } if (LocaleCompare("matte",option+1) == 0) { CLIWandWarnReplaced(IfNormalOp?"-alpha Set":"-alpha Off"); (void) SetImageAlphaChannel(_image,IfNormalOp ? SetAlphaChannel : DeactivateAlphaChannel, _exception); break; } if (LocaleCompare("mean-shift",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.10*QuantumRange; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; new_image=MeanShiftImage(_image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,geometry_info.xi,_exception); break; } if (LocaleCompare("median",option+1) == 0) { CLIWandWarnReplaced("-statistic Median"); (void) CLISimpleOperatorImage(cli_wand,"-statistic","Median",arg1,exception); break; } if (LocaleCompare("mode",option+1) == 0) { /* FUTURE: note this is also a special "montage" option */ CLIWandWarnReplaced("-statistic Mode"); (void) CLISimpleOperatorImage(cli_wand,"-statistic","Mode",arg1,exception); break; } if (LocaleCompare("modulate",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ModulateImage(_image,arg1,_exception); break; } if (LocaleCompare("monitor",option+1) == 0) { (void) SetImageProgressMonitor(_image, IfNormalOp ? MonitorProgress : (MagickProgressMonitor) NULL,(void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { (void) SetImageType(_image,BilevelType,_exception); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MagickPathExtent]; const char *p; KernelInfo *kernel; ssize_t iterations; p=arg1; GetNextToken(p,&p,MagickPathExtent,token); parse=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedFunction",option, arg1); iterations=1L; GetNextToken(p,&p,MagickPathExtent,token); if ((*p == ':') || (*p == ',')) GetNextToken(p,&p,MagickPathExtent,token); if ((*p != '\0')) iterations=(ssize_t) StringToLong(p); kernel=AcquireKernelInfo(arg2,exception); if (kernel == (KernelInfo *) NULL) CLIWandExceptArgBreak(OptionError,"UnabletoParseKernel",option,arg2); new_image=MorphologyImage(_image,(MorphologyMethod)parse,iterations, kernel,_exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("motion-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=MotionBlurImage(_image,geometry_info.rho,geometry_info.sigma, geometry_info.xi,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'n': { if (LocaleCompare("negate",option+1) == 0) { (void) NegateImage(_image, IsPlusOp, _exception); break; } if (LocaleCompare("noise",option+1) == 0) { double attenuate; const char* value; if (IfNormalOp) { CLIWandWarnReplaced("-statistic NonPeak"); (void) CLISimpleOperatorImage(cli_wand,"-statistic","NonPeak",arg1,exception); break; } parse=ParseCommandOption(MagickNoiseOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedNoiseType", option,arg1); attenuate=1.0; value=GetImageOption(_image_info,"attenuate"); if (value != (const char *) NULL) attenuate=StringToDouble(value,(char **) NULL); new_image=AddNoiseImage(_image,(NoiseType)parse,attenuate, _exception); break; } if (LocaleCompare("normalize",option+1) == 0) { (void) NormalizeImage(_image,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { PixelInfo target; (void) QueryColorCompliance(arg1,AllCompliance,&target,_exception); (void) OpaquePaintImage(_image,&target,&_draw_info->fill,IsPlusOp, _exception); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { (void) OrderedDitherImage(_image,arg1,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'p': { if (LocaleCompare("paint",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=OilPaintImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("perceptible",option+1) == 0) { (void) PerceptibleImage(_image,StringToDouble(arg1,(char **) NULL), _exception); break; } if (LocaleCompare("polaroid",option+1) == 0) { const char *caption; double angle; if (IfPlusOp) { RandomInfo *random_info; random_info=AcquireRandomInfo(); angle=22.5*(GetPseudoRandomValue(random_info)-0.5); random_info=DestroyRandomInfo(random_info); } else { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); angle=geometry_info.rho; } caption=GetImageProperty(_image,"caption",_exception); new_image=PolaroidImage(_image,_draw_info,caption,angle, _image->interpolate,_exception); break; } if (LocaleCompare("posterize",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) PosterizeImage(_image,(size_t) geometry_info.rho, _quantize_info->dither_method,_exception); break; } if (LocaleCompare("preview",option+1) == 0) { /* FUTURE: should be a 'Genesis' option? Option however is also in WandSettingOptionInfo() Why??? */ parse=ParseCommandOption(MagickPreviewOptions, MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedPreviewType", option,arg1); new_image=PreviewImage(_image,(PreviewType)parse,_exception); break; } if (LocaleCompare("profile",option+1) == 0) { const char *name; const StringInfo *profile; Image *profile_image; ImageInfo *profile_info; /* Note: arguments do not have percent escapes expanded */ if (IfPlusOp) { /* Remove a profile from the _image. */ (void) ProfileImage(_image,arg1,(const unsigned char *) NULL,0,_exception); break; } /* Associate a profile with the _image. */ profile_info=CloneImageInfo(_image_info); profile=GetImageProfile(_image,"iptc"); if (profile != (StringInfo *) NULL) profile_info->profile=(void *) CloneStringInfo(profile); profile_image=GetImageCache(profile_info,arg1,_exception); profile_info=DestroyImageInfo(profile_info); if (profile_image == (Image *) NULL) { StringInfo *profile; profile_info=CloneImageInfo(_image_info); (void) CopyMagickString(profile_info->filename,arg1, MagickPathExtent); profile=FileToStringInfo(profile_info->filename,~0UL,_exception); if (profile != (StringInfo *) NULL) { (void) SetImageInfo(profile_info,0,_exception); (void) ProfileImage(_image,profile_info->magick, GetStringInfoDatum(profile),(size_t) GetStringInfoLength(profile),_exception); profile=DestroyStringInfo(profile); } profile_info=DestroyImageInfo(profile_info); break; } ResetImageProfileIterator(profile_image); name=GetNextImageProfile(profile_image); while (name != (const char *) NULL) { profile=GetImageProfile(profile_image,name); if (profile != (StringInfo *) NULL) (void) ProfileImage(_image,name,GetStringInfoDatum(profile), (size_t) GetStringInfoLength(profile),_exception); name=GetNextImageProfile(profile_image); } profile_image=DestroyImage(profile_image); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'r': { if (LocaleCompare("raise",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParsePageGeometry(_image,arg1,&geometry,_exception); (void) RaiseImage(_image,&geometry,IsNormalOp,_exception); break; } if (LocaleCompare("random-threshold",option+1) == 0) { double min_threshold, max_threshold; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); min_threshold=0.0; max_threshold=(double) QuantumRange; flags=ParseGeometry(arg1,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(arg1,'%') != (char *) NULL) { max_threshold*=(double) (0.01*QuantumRange); min_threshold*=(double) (0.01*QuantumRange); } (void) RandomThresholdImage(_image,min_threshold,max_threshold, _exception); break; } if (LocaleCompare("range-threshold",option+1) == 0) { /* Range threshold image. */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseGeometry(arg1,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=geometry_info.sigma; if ((flags & PsiValue) == 0) geometry_info.psi=geometry_info.xi; if (strchr(arg1,'%') != (char *) NULL) { geometry_info.rho*=(double) (0.01*QuantumRange); geometry_info.sigma*=(double) (0.01*QuantumRange); geometry_info.xi*=(double) (0.01*QuantumRange); geometry_info.psi*=(double) (0.01*QuantumRange); } (void) RangeThresholdImage(_image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception); break; } if (LocaleCompare("read-mask",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ Image *mask; if (IfPlusOp) { /* Remove a mask. */ (void) SetImageMask(_image,ReadPixelMask,(Image *) NULL, _exception); break; } /* Set the image mask. */ mask=GetImageCache(_image_info,arg1,_exception); if (mask == (Image *) NULL) break; (void) SetImageMask(_image,ReadPixelMask,mask,_exception); mask=DestroyImage(mask); break; } if (LocaleCompare("recolor",option+1) == 0) { CLIWandWarnReplaced("-color-matrix"); (void) CLISimpleOperatorImage(cli_wand,"-color-matrix",arg1,NULL, exception); } if (LocaleCompare("region",option+1) == 0) { if (*option == '+') { (void) SetImageRegionMask(_image,WritePixelMask, (const RectangleInfo *) NULL,_exception); break; } if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseGravityGeometry(_image,arg1,&geometry,_exception); (void) SetImageRegionMask(_image,WritePixelMask,&geometry,_exception); break; } if (LocaleCompare("remap",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ Image *remap_image; remap_image=GetImageCache(_image_info,arg1,_exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(_quantize_info,_image,remap_image,_exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("repage",option+1) == 0) { if (IfNormalOp) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option, arg1); (void) ResetImagePage(_image,arg1); } else (void) ParseAbsoluteGeometry("0x0+0+0",&_image->page); break; } if (LocaleCompare("resample",option+1) == 0) { /* FUTURE: Roll into a resize special operation */ flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; new_image=ResampleImage(_image,geometry_info.rho, geometry_info.sigma,_image->filter,_exception); break; } if (LocaleCompare("resize",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=ResizeImage(_image,geometry.width,geometry.height, _image->filter,_exception); break; } if (LocaleCompare("roll",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParsePageGeometry(_image,arg1,&geometry,_exception); if ((flags & PercentValue) != 0) { geometry.x*=(double) _image->columns/100.0; geometry.y*=(double) _image->rows/100.0; } new_image=RollImage(_image,geometry.x,geometry.y,_exception); break; } if (LocaleCompare("rotate",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & GreaterValue) != 0 && (_image->columns <= _image->rows)) break; if ((flags & LessValue) != 0 && (_image->columns >= _image->rows)) break; new_image=RotateImage(_image,geometry_info.rho,_exception); break; } if (LocaleCompare("rotational-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=RotationalBlurImage(_image,geometry_info.rho,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 's': { if (LocaleCompare("sample",option+1) == 0) { /* FUTURE: Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=SampleImage(_image,geometry.width,geometry.height, _exception); break; } if (LocaleCompare("scale",option+1) == 0) { /* FUTURE: Roll into a resize special operator */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=ScaleImage(_image,geometry.width,geometry.height, _exception); break; } if (LocaleCompare("segment",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; (void) SegmentImage(_image,_image->colorspace, _image_info->verbose,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("selective-blur",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; new_image=SelectiveBlurImage(_image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,_exception); break; } if (LocaleCompare("separate",option+1) == 0) { /* WARNING: This can generate multiple images! */ /* FUTURE - this may be replaced by a "-channel" method */ new_image=SeparateImages(_image,_exception); break; } if (LocaleCompare("sepia-tone",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=SepiaToneImage(_image,StringToDoubleInterval(arg1, (double) QuantumRange+1.0),_exception); break; } if (LocaleCompare("shade",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if (((flags & RhoValue) == 0) || ((flags & SigmaValue) == 0)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=ShadeImage(_image,IsNormalOp,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("shadow",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=4.0; if ((flags & PsiValue) == 0) geometry_info.psi=4.0; new_image=ShadowImage(_image,geometry_info.rho,geometry_info.sigma, (ssize_t) ceil(geometry_info.xi-0.5),(ssize_t) ceil(geometry_info.psi-0.5),_exception); break; } if (LocaleCompare("sharpen",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.0; new_image=SharpenImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("shave",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParsePageGeometry(_image,arg1,&geometry,_exception); new_image=ShaveImage(_image,&geometry,_exception); break; } if (LocaleCompare("shear",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; new_image=ShearImage(_image,geometry_info.rho,geometry_info.sigma, _exception); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=(double) QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/ 100.0; (void) SigmoidalContrastImage(_image,IsNormalOp,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("sketch",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=SketchImage(_image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,_exception); break; } if (LocaleCompare("solarize",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) SolarizeImage(_image,StringToDoubleInterval(arg1,(double) QuantumRange+1.0),_exception); break; } if (LocaleCompare("sparse-color",option+1) == 0) { parse= ParseCommandOption(MagickSparseColorOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedSparseColorMethod", option,arg1); new_image=SparseColorOption(_image,(SparseColorMethod)parse,arg2, _exception); break; } if (LocaleCompare("splice",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); flags=ParseGravityGeometry(_image,arg1,&geometry,_exception); new_image=SpliceImage(_image,&geometry,_exception); break; } if (LocaleCompare("spread",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2); new_image=SpreadImage(_image,_image->interpolate,geometry_info.rho, _exception); break; } if (LocaleCompare("statistic",option+1) == 0) { parse=ParseCommandOption(MagickStatisticOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedStatisticType", option,arg1); flags=ParseGeometry(arg2,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; new_image=StatisticImage(_image,(StatisticType)parse, (size_t) geometry_info.rho,(size_t) geometry_info.sigma, _exception); break; } if (LocaleCompare("strip",option+1) == 0) { (void) StripImage(_image,_exception); break; } if (LocaleCompare("swirl",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=SwirlImage(_image,geometry_info.rho, _image->interpolate,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 't': { if (LocaleCompare("threshold",option+1) == 0) { double threshold; threshold=(double) QuantumRange/2; if (IfNormalOp) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); threshold=StringToDoubleInterval(arg1,(double) QuantumRange+1.0); } (void) BilevelImage(_image,threshold,_exception); break; } if (LocaleCompare("thumbnail",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParseRegionGeometry(_image,arg1,&geometry,_exception); new_image=ThumbnailImage(_image,geometry.width,geometry.height, _exception); break; } if (LocaleCompare("tint",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); new_image=TintImage(_image,arg1,&_draw_info->fill,_exception); break; } if (LocaleCompare("transform",option+1) == 0) { CLIWandWarnReplaced("+distort AffineProjection"); new_image=AffineTransformImage(_image,&_draw_info->affine,_exception); break; } if (LocaleCompare("transparent",option+1) == 0) { PixelInfo target; (void) QueryColorCompliance(arg1,AllCompliance,&target,_exception); (void) TransparentPaintImage(_image,&target,(Quantum) TransparentAlpha,IsPlusOp,_exception); break; } if (LocaleCompare("transpose",option+1) == 0) { new_image=TransposeImage(_image,_exception); break; } if (LocaleCompare("transverse",option+1) == 0) { new_image=TransverseImage(_image,_exception); break; } if (LocaleCompare("trim",option+1) == 0) { new_image=TrimImage(_image,_exception); break; } if (LocaleCompare("type",option+1) == 0) { /* Note that "type" setting should have already been defined */ (void) SetImageType(_image,_image_info->type,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'u': { if (LocaleCompare("unique",option+1) == 0) { /* FUTURE: move to SyncImageSettings() and AcqireImage()??? Option is not documented, bt appears to be for "identify". We may need a identify specific verbose! */ if (IsPlusOp) { (void) DeleteImageArtifact(_image,"identify:unique-colors"); break; } (void) SetImageArtifact(_image,"identify:unique-colors","true"); (void) SetImageArtifact(_image,"verbose","true"); break; } if (LocaleCompare("unique-colors",option+1) == 0) { new_image=UniqueImageColors(_image,_exception); break; } if (LocaleCompare("unsharp",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; if ((flags & PsiValue) == 0) geometry_info.psi=0.05; new_image=UnsharpMaskImage(_image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { /* FUTURE: move to SyncImageSettings() and AcquireImage()??? three places! ImageArtifact ImageOption _image_info->verbose Some how new images also get this artifact! */ (void) SetImageArtifact(_image,option+1, IfNormalOp ? "true" : "false" ); break; } if (LocaleCompare("vignette",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.1*_image->columns; if ((flags & PsiValue) == 0) geometry_info.psi=0.1*_image->rows; if ((flags & PercentValue) != 0) { geometry_info.xi*=(double) _image->columns/100.0; geometry_info.psi*=(double) _image->rows/100.0; } new_image=VignetteImage(_image,geometry_info.rho,geometry_info.sigma, (ssize_t) ceil(geometry_info.xi-0.5),(ssize_t) ceil(geometry_info.psi-0.5),_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'w': { if (LocaleCompare("wave",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & (RhoValue|SigmaValue)) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; new_image=WaveImage(_image,geometry_info.rho,geometry_info.sigma, _image->interpolate,_exception); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if ((flags & PercentValue) != 0) { geometry_info.rho=QuantumRange*geometry_info.rho/100.0; geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0; } if ((flags & SigmaValue) == 0) geometry_info.sigma=0.0; new_image=WaveletDenoiseImage(_image,geometry_info.rho, geometry_info.sigma,_exception); break; } if (LocaleCompare("white-threshold",option+1) == 0) { if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) WhiteThresholdImage(_image,arg1,_exception); break; } if (LocaleCompare("write-mask",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ Image *mask; if (IfPlusOp) { /* Remove a mask. */ (void) SetImageMask(_image,WritePixelMask,(Image *) NULL, _exception); break; } /* Set the image mask. */ mask=GetImageCache(_image_info,arg1,_exception); if (mask == (Image *) NULL) break; (void) SetImageMask(_image,WritePixelMask,mask,_exception); mask=DestroyImage(mask); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } default: CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } /* clean up percent escape interpreted strings */ if (arg1 != arg1n ) arg1=DestroyString((char *)arg1); if (arg2 != arg2n ) arg2=DestroyString((char *)arg2); /* Replace current image with any image that was generated and set image point to last image (so image->next is correct) */ if (new_image != (Image *) NULL) ReplaceImageInListReturnLast(&_image,new_image); return(MagickTrue); #undef _image_info #undef _draw_info #undef _quantize_info #undef _image #undef _exception #undef IfNormalOp #undef IfPlusOp #undef IsNormalOp #undef IsPlusOp } WandPrivate MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand, const char *option,const char *arg1,const char *arg2,ExceptionInfo *exception) { #if !USE_WAND_METHODS size_t n, i; #endif assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); assert(cli_wand->wand.images != (Image *) NULL); /* images must be present */ if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- Simple Operator: %s \"%s\" \"%s\"", option,arg1,arg2); #if !USE_WAND_METHODS /* FUTURE add appropriate tracing */ i=0; n=GetImageListLength(cli_wand->wand.images); cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images); while (1) { i++; CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception); if ( cli_wand->wand.images->next == (Image *) NULL ) break; cli_wand->wand.images=cli_wand->wand.images->next; } assert( i == n ); cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images); #else MagickResetIterator(&cli_wand->wand); while (MagickNextImage(&cli_wand->wand) != MagickFalse) (void) CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception); MagickResetIterator(&cli_wand->wand); #endif return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C L I L i s t O p e r a t o r I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLIListOperatorImages() applies a single operation that is apply to the % entire image list as a whole. The result is often a complete replacment % of the image list with a completely new list, or with just a single image % result. % % The format of the MogrifyImage method is: % % MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand, % const char *option,const char *arg1,const char *arg2) % % A description of each parameter follows: % % o cli_wand: structure holding settings to be applied % % o option: The option string for the operation % % o arg1, arg2: optional argument strings to the operation % arg2 is currently not used % */ WandPrivate MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand, const char *option,const char *arg1n,const char *arg2n) { const char /* percent escaped versions of the args */ *arg1, *arg2; Image *new_images; MagickStatusType status; ssize_t parse; #define _image_info (cli_wand->wand.image_info) #define _images (cli_wand->wand.images) #define _exception (cli_wand->wand.exception) #define _draw_info (cli_wand->draw_info) #define _quantize_info (cli_wand->quantize_info) #define _process_flags (cli_wand->process_flags) #define _option_type ((CommandOptionFlags) cli_wand->command->flags) #define IfNormalOp (*option=='-') #define IfPlusOp (*option!='-') #define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); assert(_images != (Image *) NULL); /* _images must be present */ if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- List Operator: %s \"%s\" \"%s\"", option, arg1n == (const char *) NULL ? "null" : arg1n, arg2n == (const char *) NULL ? "null" : arg2n); arg1 = arg1n; arg2 = arg2n; /* Interpret Percent Escapes in Arguments - using first image */ if ( (((_process_flags & ProcessInterpretProperities) != 0 ) || ((_option_type & AlwaysInterpretArgsFlag) != 0) ) && ((_option_type & NeverInterpretArgsFlag) == 0) ) { /* Interpret Percent escapes in argument 1 */ if (arg1n != (char *) NULL) { arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception); if (arg1 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg1=arg1n; /* use the given argument as is */ } } if (arg2n != (char *) NULL) { arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg2=arg2n; /* use the given argument as is */ } } } #undef _process_flags #undef _option_type status=MagickTrue; new_images=NewImageList(); switch (*(option+1)) { case 'a': { if (LocaleCompare("append",option+1) == 0) { new_images=AppendImages(_images,IsNormalOp,_exception); break; } if (LocaleCompare("average",option+1) == 0) { CLIWandWarnReplaced("-evaluate-sequence Mean"); (void) CLIListOperatorImages(cli_wand,"-evaluate-sequence","Mean", NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'c': { if (LocaleCompare("channel-fx",option+1) == 0) { new_images=ChannelFxImage(_images,arg1,_exception); break; } if (LocaleCompare("clut",option+1) == 0) { Image *clut_image; /* FUTURE - make this a compose option, and thus can be used with layers compose or even compose last image over all other _images. */ new_images=RemoveFirstImageFromList(&_images); clut_image=RemoveFirstImageFromList(&_images); /* FUTURE - produce Exception, rather than silent fail */ if (clut_image == (Image *) NULL) { (void) ThrowMagickException(_exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); new_images=DestroyImage(new_images); status=MagickFalse; break; } (void) ClutImage(new_images,clut_image,new_images->interpolate, _exception); clut_image=DestroyImage(clut_image); break; } if (LocaleCompare("coalesce",option+1) == 0) { new_images=CoalesceImages(_images,_exception); break; } if (LocaleCompare("combine",option+1) == 0) { parse=(ssize_t) _images->colorspace; if (_images->number_channels < GetImageListLength(_images)) parse=sRGBColorspace; if ( IfPlusOp ) parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option, arg1); new_images=CombineImages(_images,(ColorspaceType) parse,_exception); break; } if (LocaleCompare("compare",option+1) == 0) { double distortion; Image *image, *reconstruct_image; MetricType metric; /* Mathematically and visually annotate the difference between an image and its reconstruction. */ image=RemoveFirstImageFromList(&_images); reconstruct_image=RemoveFirstImageFromList(&_images); /* FUTURE - produce Exception, rather than silent fail */ if (reconstruct_image == (Image *) NULL) { (void) ThrowMagickException(_exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); image=DestroyImage(image); status=MagickFalse; break; } metric=UndefinedErrorMetric; option=GetImageOption(_image_info,"metric"); if (option != (const char *) NULL) metric=(MetricType) ParseCommandOption(MagickMetricOptions, MagickFalse,option); new_images=CompareImages(image,reconstruct_image,metric,&distortion, _exception); (void) distortion; reconstruct_image=DestroyImage(reconstruct_image); image=DestroyImage(image); break; } if (LocaleCompare("complex",option+1) == 0) { parse=ParseCommandOption(MagickComplexOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator", option,arg1); new_images=ComplexImages(_images,(ComplexOperator) parse,_exception); break; } if (LocaleCompare("composite",option+1) == 0) { CompositeOperator compose; const char* value; MagickBooleanType clip_to_self; Image *mask_image, *source_image; RectangleInfo geometry; /* Compose value from "-compose" option only */ value=GetImageOption(_image_info,"compose"); if (value == (const char *) NULL) compose=OverCompositeOp; /* use Over not source_image->compose */ else compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,value); /* Get "clip-to-self" expert setting (false is normal) */ clip_to_self=GetCompositeClipToSelf(compose); value=GetImageOption(_image_info,"compose:clip-to-self"); if (value != (const char *) NULL) clip_to_self=IsStringTrue(value); value=GetImageOption(_image_info,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsStringFalse(value); /* deprecated */ new_images=RemoveFirstImageFromList(&_images); source_image=RemoveFirstImageFromList(&_images); if (source_image == (Image *) NULL) { (void) ThrowMagickException(_exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); new_images=DestroyImage(new_images); status=MagickFalse; break; } /* FUTURE - this should not be here! - should be part of -geometry */ if (source_image->geometry != (char *) NULL) { RectangleInfo resize_geometry; (void) ParseRegionGeometry(source_image,source_image->geometry, &resize_geometry,_exception); if ((source_image->columns != resize_geometry.width) || (source_image->rows != resize_geometry.height)) { Image *resize_image; resize_image=ResizeImage(source_image,resize_geometry.width, resize_geometry.height,source_image->filter,_exception); if (resize_image != (Image *) NULL) { source_image=DestroyImage(source_image); source_image=resize_image; } } } SetGeometry(source_image,&geometry); (void) ParseAbsoluteGeometry(source_image->geometry,&geometry); GravityAdjustGeometry(new_images->columns,new_images->rows, new_images->gravity, &geometry); mask_image=RemoveFirstImageFromList(&_images); if (mask_image == (Image *) NULL) status&=CompositeImage(new_images,source_image,compose,clip_to_self, geometry.x,geometry.y,_exception); else { if ((compose == DisplaceCompositeOp) || (compose == DistortCompositeOp)) { status&=CompositeImage(source_image,mask_image, CopyGreenCompositeOp,MagickTrue,0,0,_exception); status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,_exception); } else { Image *clone_image; clone_image=CloneImage(new_images,0,0,MagickTrue,_exception); if (clone_image == (Image *) NULL) break; status&=CompositeImage(new_images,source_image,compose, clip_to_self,geometry.x,geometry.y,_exception); status&=CompositeImage(new_images,mask_image, CopyAlphaCompositeOp,MagickTrue,0,0,_exception); status&=CompositeImage(clone_image,new_images,OverCompositeOp, clip_to_self,0,0,_exception); new_images=DestroyImageList(new_images); new_images=clone_image; } mask_image=DestroyImage(mask_image); } source_image=DestroyImage(source_image); break; } if (LocaleCompare("copy",option+1) == 0) { Image *source_image; OffsetInfo offset; RectangleInfo geometry; /* Copy image pixels. */ if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); if (IsGeometry(arg2) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); (void) ParsePageGeometry(_images,arg2,&geometry,_exception); offset.x=geometry.x; offset.y=geometry.y; source_image=_images; if (source_image->next != (Image *) NULL) source_image=source_image->next; (void) ParsePageGeometry(source_image,arg1,&geometry,_exception); (void) CopyImagePixels(_images,source_image,&geometry,&offset, _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'd': { if (LocaleCompare("deconstruct",option+1) == 0) { CLIWandWarnReplaced("-layer CompareAny"); (void) CLIListOperatorImages(cli_wand,"-layer","CompareAny",NULL); break; } if (LocaleCompare("delete",option+1) == 0) { if (IfNormalOp) DeleteImages(&_images,arg1,_exception); else DeleteImages(&_images,"-1",_exception); break; } if (LocaleCompare("duplicate",option+1) == 0) { if (IfNormalOp) { const char *p; size_t number_duplicates; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option, arg1); number_duplicates=(size_t) StringToLong(arg1); p=strchr(arg1,','); if (p == (const char *) NULL) new_images=DuplicateImages(_images,number_duplicates,"-1", _exception); else new_images=DuplicateImages(_images,number_duplicates,p, _exception); } else new_images=DuplicateImages(_images,1,"-1",_exception); AppendImageToList(&_images, new_images); new_images=(Image *) NULL; break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'e': { if (LocaleCompare("evaluate-sequence",option+1) == 0) { parse=ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1); if (parse < 0) CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator", option,arg1); new_images=EvaluateImages(_images,(MagickEvaluateOperator) parse, _exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'f': { if (LocaleCompare("fft",option+1) == 0) { new_images=ForwardFourierTransformImage(_images,IsNormalOp, _exception); break; } if (LocaleCompare("flatten",option+1) == 0) { /* REDIRECTED to use -layers flatten instead */ (void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL); break; } if (LocaleCompare("fx",option+1) == 0) { new_images=FxImage(_images,arg1,_exception); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) { /* FUTURE - make this a compose option (and thus layers compose ) or perhaps compose last image over all other _images. */ Image *hald_image; new_images=RemoveFirstImageFromList(&_images); hald_image=RemoveLastImageFromList(&_images); if (hald_image == (Image *) NULL) { (void) ThrowMagickException(_exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); new_images=DestroyImage(new_images); status=MagickFalse; break; } (void) HaldClutImage(new_images,hald_image,_exception); hald_image=DestroyImage(hald_image); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'i': { if (LocaleCompare("ift",option+1) == 0) { Image *magnitude_image, *phase_image; magnitude_image=RemoveFirstImageFromList(&_images); phase_image=RemoveFirstImageFromList(&_images); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(_exception,GetMagickModule(), OptionError,"ImageSequenceRequired","`%s'",option); magnitude_image=DestroyImage(magnitude_image); status=MagickFalse; break; } new_images=InverseFourierTransformImage(magnitude_image,phase_image, IsNormalOp,_exception); magnitude_image=DestroyImage(magnitude_image); phase_image=DestroyImage(phase_image); break; } if (LocaleCompare("insert",option+1) == 0) { Image *insert_image, *index_image; ssize_t index; if (IfNormalOp && (IsGeometry(arg1) == MagickFalse)) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); index=0; insert_image=RemoveLastImageFromList(&_images); if (IfNormalOp) index=(ssize_t) StringToLong(arg1); index_image=insert_image; if (index == 0) PrependImageToList(&_images,insert_image); else if (index == (ssize_t) GetImageListLength(_images)) AppendImageToList(&_images,insert_image); else { index_image=GetImageFromList(_images,index-1); if (index_image == (Image *) NULL) { insert_image=DestroyImage(insert_image); CLIWandExceptArgBreak(OptionError,"NoSuchImage",option,arg1); } InsertImageInList(&index_image,insert_image); } _images=GetFirstImageInList(index_image); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'l': { if (LocaleCompare("layers",option+1) == 0) { parse=ParseCommandOption(MagickLayerOptions,MagickFalse,arg1); if ( parse < 0 ) CLIWandExceptArgBreak(OptionError,"UnrecognizedLayerMethod", option,arg1); switch ((LayerMethod) parse) { case CoalesceLayer: { new_images=CoalesceImages(_images,_exception); break; } case CompareAnyLayer: case CompareClearLayer: case CompareOverlayLayer: default: { new_images=CompareImagesLayers(_images,(LayerMethod) parse, _exception); break; } case MergeLayer: case FlattenLayer: case MosaicLayer: case TrimBoundsLayer: { new_images=MergeImageLayers(_images,(LayerMethod) parse, _exception); break; } case DisposeLayer: { new_images=DisposeImages(_images,_exception); break; } case OptimizeImageLayer: { new_images=OptimizeImageLayers(_images,_exception); break; } case OptimizePlusLayer: { new_images=OptimizePlusImageLayers(_images,_exception); break; } case OptimizeTransLayer: { OptimizeImageTransparency(_images,_exception); break; } case RemoveDupsLayer: { RemoveDuplicateLayers(&_images,_exception); break; } case RemoveZeroLayer: { RemoveZeroDelayLayers(&_images,_exception); break; } case OptimizeLayer: { /* General Purpose, GIF Animation Optimizer. */ new_images=CoalesceImages(_images,_exception); if (new_images == (Image *) NULL) break; _images=DestroyImageList(_images); _images=OptimizeImageLayers(new_images,_exception); if (_images == (Image *) NULL) break; new_images=DestroyImageList(new_images); OptimizeImageTransparency(_images,_exception); (void) RemapImages(_quantize_info,_images,(Image *) NULL, _exception); break; } case CompositeLayer: { Image *source; RectangleInfo geometry; CompositeOperator compose; const char* value; value=GetImageOption(_image_info,"compose"); compose=OverCompositeOp; /* Default to Over */ if (value != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,value); /* Split image sequence at the first 'NULL:' image. */ source=_images; while (source != (Image *) NULL) { source=GetNextImageInList(source); if ((source != (Image *) NULL) && (LocaleCompare(source->magick,"NULL") == 0)) break; } if (source != (Image *) NULL) { if ((GetPreviousImageInList(source) == (Image *) NULL) || (GetNextImageInList(source) == (Image *) NULL)) source=(Image *) NULL; else { /* Separate the two lists, junk the null: image. */ source=SplitImageList(source->previous); DeleteImageFromList(&source); } } if (source == (Image *) NULL) { (void) ThrowMagickException(_exception,GetMagickModule(), OptionError,"MissingNullSeparator","layers Composite"); break; } /* Adjust offset with gravity and virtual canvas. */ SetGeometry(_images,&geometry); (void) ParseAbsoluteGeometry(_images->geometry,&geometry); geometry.width=source->page.width != 0 ? source->page.width : source->columns; geometry.height=source->page.height != 0 ? source->page.height : source->rows; GravityAdjustGeometry(_images->page.width != 0 ? _images->page.width : _images->columns, _images->page.height != 0 ? _images->page.height : _images->rows,_images->gravity,&geometry); /* Compose the two image sequences together */ CompositeLayers(_images,compose,source,geometry.x,geometry.y, _exception); source=DestroyImageList(source); break; } } break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'm': { if (LocaleCompare("map",option+1) == 0) { CLIWandWarnReplaced("+remap"); (void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception); break; } if (LocaleCompare("metric",option+1) == 0) { (void) SetImageOption(_image_info,option+1,arg1); break; } if (LocaleCompare("morph",option+1) == 0) { Image *morph_image; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); morph_image=MorphImages(_images,StringToUnsignedLong(arg1), _exception); if (morph_image == (Image *) NULL) break; _images=DestroyImageList(_images); _images=morph_image; break; } if (LocaleCompare("mosaic",option+1) == 0) { /* REDIRECTED to use -layers mosaic instead */ (void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'p': { if (LocaleCompare("poly",option+1) == 0) { double *args; ssize_t count; /* convert argument string into an array of doubles */ args = StringToArrayOfDoubles(arg1,&count,_exception); if (args == (double *) NULL ) CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg1); new_images=PolynomialImage(_images,(size_t) (count >> 1),args, _exception); args=(double *) RelinquishMagickMemory(args); break; } if (LocaleCompare("process",option+1) == 0) { /* FUTURE: better parsing using ScriptToken() from string ??? */ char **arguments; int j, number_arguments; arguments=StringToArgv(arg1,&number_arguments); if (arguments == (char **) NULL) break; if (strchr(arguments[1],'=') != (char *) NULL) { char breaker, quote, *token; const char *arguments; int next, status; size_t length; TokenInfo *token_info; /* Support old style syntax, filter="-option arg1". */ assert(arg1 != (const char *) NULL); length=strlen(arg1); token=(char *) NULL; if (~length >= (MagickPathExtent-1)) token=(char *) AcquireQuantumMemory(length+MagickPathExtent, sizeof(*token)); if (token == (char *) NULL) break; next=0; arguments=arg1; token_info=AcquireTokenInfo(); status=Tokenizer(token_info,0,token,length,arguments,"","=", "\"",'\0',&breaker,&next,&quote); token_info=DestroyTokenInfo(token_info); if (status == 0) { const char *argv; argv=(&(arguments[next])); (void) InvokeDynamicImageFilter(token,&_images,1,&argv, _exception); } token=DestroyString(token); break; } (void) SubstituteString(&arguments[1],"-",""); (void) InvokeDynamicImageFilter(arguments[1],&_images, number_arguments-2,(const char **) arguments+2,_exception); for (j=0; j < number_arguments; j++) arguments[j]=DestroyString(arguments[j]); arguments=(char **) RelinquishMagickMemory(arguments); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 'r': { if (LocaleCompare("remap",option+1) == 0) { (void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception); break; } if (LocaleCompare("reverse",option+1) == 0) { ReverseImageList(&_images); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } case 's': { if (LocaleCompare("smush",option+1) == 0) { /* FUTURE: this option needs more work to make better */ ssize_t offset; if (IsGeometry(arg1) == MagickFalse) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); offset=(ssize_t) StringToLong(arg1); new_images=SmushImages(_images,IsNormalOp,offset,_exception); break; } if (LocaleCompare("subimage",option+1) == 0) { Image *base_image, *compare_image; const char *value; MetricType metric; double similarity; RectangleInfo offset; base_image=GetImageFromList(_images,0); compare_image=GetImageFromList(_images,1); /* Comparision Metric */ metric=UndefinedErrorMetric; value=GetImageOption(_image_info,"metric"); if (value != (const char *) NULL) metric=(MetricType) ParseCommandOption(MagickMetricOptions, MagickFalse,value); new_images=SimilarityImage(base_image,compare_image,metric,0.0, &offset,&similarity,_exception); if (new_images != (Image *) NULL) { char result[MagickPathExtent]; (void) FormatLocaleString(result,MagickPathExtent,"%lf", similarity); (void) SetImageProperty(new_images,"subimage:similarity",result, _exception); (void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long) offset.x); (void) SetImageProperty(new_images,"subimage:x",result, _exception); (void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long) offset.y); (void) SetImageProperty(new_images,"subimage:y",result, _exception); (void) FormatLocaleString(result,MagickPathExtent, "%lux%lu%+ld%+ld",(unsigned long) offset.width,(unsigned long) offset.height,(long) offset.x,(long) offset.y); (void) SetImageProperty(new_images,"subimage:offset",result, _exception); } break; } if (LocaleCompare("swap",option+1) == 0) { Image *p, *q, *swap; ssize_t index, swap_index; index=(-1); swap_index=(-2); if (IfNormalOp) { GeometryInfo geometry_info; MagickStatusType flags; swap_index=(-1); flags=ParseGeometry(arg1,&geometry_info); if ((flags & RhoValue) == 0) CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1); index=(ssize_t) geometry_info.rho; if ((flags & SigmaValue) != 0) swap_index=(ssize_t) geometry_info.sigma; } p=GetImageFromList(_images,index); q=GetImageFromList(_images,swap_index); if ((p == (Image *) NULL) || (q == (Image *) NULL)) { if (IfNormalOp) CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1) else CLIWandExceptionBreak(OptionError,"TwoOrMoreImagesRequired",option); } if (p == q) CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1); swap=CloneImage(p,0,0,MagickTrue,_exception); if (swap == (Image *) NULL) CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed", option,GetExceptionMessage(errno)); ReplaceImageInList(&p,CloneImage(q,0,0,MagickTrue,_exception)); ReplaceImageInList(&q,swap); _images=GetFirstImageInList(q); break; } CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } default: CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option); } /* clean up percent escape interpreted strings */ if (arg1 != arg1n ) arg1=DestroyString((char *)arg1); if (arg2 != arg2n ) arg2=DestroyString((char *)arg2); /* if new image list generated, replace existing image list */ if (new_images == (Image *) NULL) return(status == 0 ? MagickFalse : MagickTrue); _images=DestroyImageList(_images); _images=GetFirstImageInList(new_images); return(status == 0 ? MagickFalse : MagickTrue); #undef _image_info #undef _images #undef _exception #undef _draw_info #undef _quantize_info #undef IfNormalOp #undef IfPlusOp #undef IsNormalOp } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C L I N o I m a g e O p e r a t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLINoImageOperator() Applies operations that may not actually need images % in an image list. % % The classic operators of this type is "-read", which actually creates % images even when no images are present. Or image stack operators, which % can be applied (push or pop) to an empty image list. % % Note that these operators may involve other special 'option' prefix % characters other than '-' or '+', namely parenthesis and braces. % % The format of the CLINoImageOption method is: % % void CLINoImageOption(MagickCLI *cli_wand,const char *option, % const char *arg1, const char *arg2) % % A description of each parameter follows: % % o cli_wand: the main CLI Wand to use. (sometimes not required) % % o option: The special option (with any switch char) to process % % o arg1 & arg2: Argument for option, if required % Currently arg2 is not used. % */ WandPrivate void CLINoImageOperator(MagickCLI *cli_wand, const char *option,const char *arg1n,const char *arg2n) { const char /* percent escaped versions of the args */ *arg1, *arg2; #define _image_info (cli_wand->wand.image_info) #define _images (cli_wand->wand.images) #define _exception (cli_wand->wand.exception) #define _process_flags (cli_wand->process_flags) #define _option_type ((CommandOptionFlags) cli_wand->command->flags) #define IfNormalOp (*option=='-') #define IfPlusOp (*option!='-') assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- NoImage Operator: %s \"%s\" \"%s\"", option, arg1n != (char *) NULL ? arg1n : "", arg2n != (char *) NULL ? arg2n : ""); arg1 = arg1n; arg2 = arg2n; /* Interpret Percent Escapes in Arguments - using first image */ if ( (((_process_flags & ProcessInterpretProperities) != 0 ) || ((_option_type & AlwaysInterpretArgsFlag) != 0) ) && ((_option_type & NeverInterpretArgsFlag) == 0) ) { /* Interpret Percent escapes in argument 1 */ if (arg1n != (char *) NULL) { arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception); if (arg1 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg1=arg1n; /* use the given argument as is */ } } if (arg2n != (char *) NULL) { arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) { CLIWandException(OptionWarning,"InterpretPropertyFailure",option); arg2=arg2n; /* use the given argument as is */ } } } #undef _process_flags #undef _option_type do { /* break to exit code */ /* No-op options (ignore these) */ if (LocaleCompare("noop",option+1) == 0) /* zero argument */ break; if (LocaleCompare("sans",option+1) == 0) /* one argument */ break; if (LocaleCompare("sans0",option+1) == 0) /* zero argument */ break; if (LocaleCompare("sans1",option+1) == 0) /* one argument */ break; if (LocaleCompare("sans2",option+1) == 0) /* two arguments */ break; /* Image Reading */ if ( ( LocaleCompare("read",option+1) == 0 ) || ( LocaleCompare("--",option) == 0 ) ) { /* Do Glob filename Expansion for 'arg1' then read all images. * * Expansion handles '@', '~', '*', and '?' meta-characters while ignoring * (but attaching to the filenames in the generated argument list) any * [...] read modifiers that may be present. * * For example: It will expand '*.gif[20x20]' into a list such as * 'abc.gif[20x20]', 'foobar.gif[20x20]', 'xyzzy.gif[20x20]' * * NOTE: In IMv6 this was done globally across all images. This * meant you could include IM options in '@filename' lists, but you * could not include comments. Doing it only for image read makes * it far more secure. * * Note: arguments do not have percent escapes expanded for security * reasons. */ int argc; char **argv; ssize_t i; argc = 1; argv = (char **) &arg1; /* Expand 'glob' expressions in the given filename. Expansion handles any 'coder:' prefix, or read modifiers attached to the filename, including them in the resulting expanded list. */ if (ExpandFilenames(&argc,&argv) == MagickFalse) CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed", option,GetExceptionMessage(errno)); /* loop over expanded filename list, and read then all in */ for (i=0; i < (ssize_t) argc; i++) { Image * new_images; if (_image_info->ping != MagickFalse) new_images=PingImages(_image_info,argv[i],_exception); else new_images=ReadImages(_image_info,argv[i],_exception); AppendImageToList(&_images, new_images); argv[i]=DestroyString(argv[i]); } argv=(char **) RelinquishMagickMemory(argv); break; } /* Image Writing Note: Writing a empty image list is valid in specific cases */ if (LocaleCompare("write",option+1) == 0) { /* Note: arguments do not have percent escapes expanded */ char key[MagickPathExtent]; Image *write_images; ImageInfo *write_info; /* Need images, unless a "null:" output coder is used */ if ( _images == (Image *) NULL ) { if ( LocaleCompare(arg1,"null:") == 0 ) break; CLIWandExceptArgBreak(OptionError,"NoImagesForWrite",option,arg1); } (void) FormatLocaleString(key,MagickPathExtent,"cache:%s",arg1); (void) DeleteImageRegistry(key); write_images=_images; if (IfPlusOp) write_images=CloneImageList(_images,_exception); write_info=CloneImageInfo(_image_info); (void) WriteImages(write_info,write_images,arg1,_exception); write_info=DestroyImageInfo(write_info); if (IfPlusOp) write_images=DestroyImageList(write_images); break; } /* Parenthesis and Brace operations */ if (LocaleCompare("(",option) == 0) { /* stack 'push' images */ Stack *node; size_t size; size=0; node=cli_wand->image_list_stack; for ( ; node != (Stack *) NULL; node=node->next) size++; if ( size >= MAX_STACK_DEPTH ) CLIWandExceptionBreak(OptionError,"ParenthesisNestedTooDeeply",option); node=(Stack *) AcquireMagickMemory(sizeof(*node)); if (node == (Stack *) NULL) CLIWandExceptionBreak(ResourceLimitFatalError, "MemoryAllocationFailed",option); node->data = (void *)cli_wand->wand.images; node->next = cli_wand->image_list_stack; cli_wand->image_list_stack = node; cli_wand->wand.images = NewImageList(); /* handle respect-parenthesis */ if (IsStringTrue(GetImageOption(cli_wand->wand.image_info, "respect-parenthesis")) != MagickFalse) option="{"; /* fall-thru so as to push image settings too */ else break; /* fall thru to operation */ } if (LocaleCompare("{",option) == 0) { /* stack 'push' of image_info settings */ Stack *node; size_t size; size=0; node=cli_wand->image_info_stack; for ( ; node != (Stack *) NULL; node=node->next) size++; if ( size >= MAX_STACK_DEPTH ) CLIWandExceptionBreak(OptionError,"CurlyBracesNestedTooDeeply",option); node=(Stack *) AcquireMagickMemory(sizeof(*node)); if (node == (Stack *) NULL) CLIWandExceptionBreak(ResourceLimitFatalError, "MemoryAllocationFailed",option); node->data = (void *)cli_wand->wand.image_info; node->next = cli_wand->image_info_stack; cli_wand->image_info_stack = node; cli_wand->wand.image_info = CloneImageInfo(cli_wand->wand.image_info); if (cli_wand->wand.image_info == (ImageInfo *) NULL) { CLIWandException(ResourceLimitFatalError,"MemoryAllocationFailed", option); cli_wand->wand.image_info = (ImageInfo *)node->data; node = (Stack *)RelinquishMagickMemory(node); break; } break; } if (LocaleCompare(")",option) == 0) { /* pop images from stack */ Stack *node; node = (Stack *)cli_wand->image_list_stack; if ( node == (Stack *) NULL) CLIWandExceptionBreak(OptionError,"UnbalancedParenthesis",option); cli_wand->image_list_stack = node->next; AppendImageToList((Image **)&node->data,cli_wand->wand.images); cli_wand->wand.images= (Image *)node->data; node = (Stack *)RelinquishMagickMemory(node); /* handle respect-parenthesis - of the previous 'pushed' settings */ node = cli_wand->image_info_stack; if ( node != (Stack *) NULL) { if (IsStringTrue(GetImageOption( cli_wand->wand.image_info,"respect-parenthesis")) != MagickFalse) option="}"; /* fall-thru so as to pop image settings too */ else break; } else break; /* fall thru to next if */ } if (LocaleCompare("}",option) == 0) { /* pop image_info settings from stack */ Stack *node; node = (Stack *)cli_wand->image_info_stack; if ( node == (Stack *) NULL) CLIWandExceptionBreak(OptionError,"UnbalancedCurlyBraces",option); cli_wand->image_info_stack = node->next; (void) DestroyImageInfo(cli_wand->wand.image_info); cli_wand->wand.image_info = (ImageInfo *)node->data; node = (Stack *)RelinquishMagickMemory(node); GetDrawInfo(cli_wand->wand.image_info, cli_wand->draw_info); cli_wand->quantize_info=DestroyQuantizeInfo(cli_wand->quantize_info); cli_wand->quantize_info=AcquireQuantizeInfo(cli_wand->wand.image_info); break; } if (LocaleCompare("print",option+1) == 0) { (void) FormatLocaleFile(stdout,"%s",arg1); break; } if (LocaleCompare("set",option+1) == 0) { /* Settings are applied to each image in memory in turn (if any). While a option: only need to be applied once globally. NOTE: rguments have not been automatically percent expaneded */ /* escape the 'key' once only, using first image. */ arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception); if (arg1 == (char *) NULL) CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure", option); if (LocaleNCompare(arg1,"registry:",9) == 0) { if (IfPlusOp) { (void) DeleteImageRegistry(arg1+9); arg1=DestroyString((char *)arg1); break; } arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) { arg1=DestroyString((char *)arg1); CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure", option); } (void) SetImageRegistry(StringRegistryType,arg1+9,arg2,_exception); arg1=DestroyString((char *)arg1); arg2=DestroyString((char *)arg2); break; } if (LocaleNCompare(arg1,"option:",7) == 0) { /* delete equivelent artifact from all images (if any) */ if (_images != (Image *) NULL) { MagickResetIterator(&cli_wand->wand); while (MagickNextImage(&cli_wand->wand) != MagickFalse) (void) DeleteImageArtifact(_images,arg1+7); MagickResetIterator(&cli_wand->wand); } /* now set/delete the global option as needed */ /* FUTURE: make escapes in a global 'option:' delayed */ arg2=(char *) NULL; if (IfNormalOp) { arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) CLIWandExceptionBreak(OptionWarning, "InterpretPropertyFailure",option); } (void) SetImageOption(_image_info,arg1+7,arg2); arg1=DestroyString((char *)arg1); arg2=DestroyString((char *)arg2); break; } /* Set Artifacts/Properties/Attributes all images (required) */ if ( _images == (Image *) NULL ) CLIWandExceptArgBreak(OptionWarning,"NoImageForProperty",option,arg1); MagickResetIterator(&cli_wand->wand); while (MagickNextImage(&cli_wand->wand) != MagickFalse) { arg2=(char *) NULL; if (IfNormalOp) { arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception); if (arg2 == (char *) NULL) CLIWandExceptionBreak(OptionWarning, "InterpretPropertyFailure",option); } if (LocaleNCompare(arg1,"artifact:",9) == 0) (void) SetImageArtifact(_images,arg1+9,arg2); else if (LocaleNCompare(arg1,"property:",9) == 0) (void) SetImageProperty(_images,arg1+9,arg2,_exception); else (void) SetImageProperty(_images,arg1,arg2,_exception); arg2=DestroyString((char *)arg2); } MagickResetIterator(&cli_wand->wand); arg1=DestroyString((char *)arg1); break; } if (LocaleCompare("clone",option+1) == 0) { Image *new_images; if (*option == '+') arg1=AcquireString("-1"); if (IsSceneGeometry(arg1,MagickFalse) == MagickFalse) CLIWandExceptionBreak(OptionError,"InvalidArgument",option); if ( cli_wand->image_list_stack == (Stack *) NULL) CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option); new_images = (Image *)cli_wand->image_list_stack->data; if (new_images == (Image *) NULL) CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option); new_images=CloneImages(new_images,arg1,_exception); if (new_images == (Image *) NULL) CLIWandExceptionBreak(OptionError,"NoSuchImage",option); AppendImageToList(&_images,new_images); break; } /* Informational Operations. Note that these do not require either a cli-wand or images! Though currently a cli-wand much be provided regardless. */ if (LocaleCompare("version",option+1) == 0) { ListMagickVersion(stdout); break; } if (LocaleCompare("list",option+1) == 0) { /* FUTURE: This 'switch' should really be part of MagickCore */ ssize_t list; list=ParseCommandOption(MagickListOptions,MagickFalse,arg1); if ( list < 0 ) { CLIWandExceptionArg(OptionError,"UnrecognizedListType",option,arg1); break; } switch (list) { case MagickCoderOptions: { (void) ListCoderInfo((FILE *) NULL,_exception); break; } case MagickColorOptions: { (void) ListColorInfo((FILE *) NULL,_exception); break; } case MagickConfigureOptions: { (void) ListConfigureInfo((FILE *) NULL,_exception); break; } case MagickDelegateOptions: { (void) ListDelegateInfo((FILE *) NULL,_exception); break; } case MagickFontOptions: { (void) ListTypeInfo((FILE *) NULL,_exception); break; } case MagickFormatOptions: (void) ListMagickInfo((FILE *) NULL,_exception); break; case MagickLocaleOptions: (void) ListLocaleInfo((FILE *) NULL,_exception); break; case MagickLogOptions: (void) ListLogInfo((FILE *) NULL,_exception); break; case MagickMagicOptions: (void) ListMagicInfo((FILE *) NULL,_exception); break; case MagickMimeOptions: (void) ListMimeInfo((FILE *) NULL,_exception); break; case MagickModuleOptions: (void) ListModuleInfo((FILE *) NULL,_exception); break; case MagickPolicyOptions: (void) ListPolicyInfo((FILE *) NULL,_exception); break; case MagickResourceOptions: (void) ListMagickResourceInfo((FILE *) NULL,_exception); break; case MagickThresholdOptions: (void) ListThresholdMaps((FILE *) NULL,_exception); break; default: (void) ListCommandOptions((FILE *) NULL,(CommandOption) list, _exception); break; } break; } CLIWandException(OptionError,"UnrecognizedOption",option); DisableMSCWarning(4127) } while (0); /* break to exit code. */ RestoreMSCWarning /* clean up percent escape interpreted strings */ if (arg1 != arg1n ) arg1=DestroyString((char *)arg1); if (arg2 != arg2n ) arg2=DestroyString((char *)arg2); #undef _image_info #undef _images #undef _exception #undef IfNormalOp #undef IfPlusOp } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C L I O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLIOption() Processes the given option using the given CLI Magick Wand. % The option arguments can be variable in number, though at this time no more % that two is actually used by any option (this may change). Excess options % are simply ignored. % % If the cli_wand->command pointer is non-null, then it is assumed that the % option has already been search for up from the CommandOptions[] table in % "MagickCore/options.c" using GetCommandOptionInfo(). If not set this % routine will do the lookup instead. The pointer is reset afterward. % % This action allows the caller to lookup and pre-handle any 'special' % options, (such as implicit reads) before calling this general option % handler to deal with 'standard' command line options. % % The format of the CLIOption method is: % % void CLIOption(MagickCLI *cli_wand,const char *option, ...) % % A description of each parameter follows: % % o cli_wand: the main CLI Wand to use. % % o option: The special option (with any switch char) to process % % o args: any required arguments for an option (variable number) % % Example Usage... % % CLIoption(cli_wand,"-read","rose:"); % CLIoption(cli_wand,"-virtual-pixel","transparent"); % CLIoption(cli_wand,"-distort","SRT:","30"); % CLIoption(cli_wand,"-write","rotated_rose.png"); % */ WandExport void CLIOption(MagickCLI *cli_wand,const char *option,...) { const char /* extracted option args from args */ *arg1, *arg2; CommandOptionFlags option_type; assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); do { /* Break Code Block for error handling */ /* get information about option */ if ( cli_wand->command == (const OptionInfo *) NULL ) cli_wand->command = GetCommandOptionInfo(option); #if 0 (void) FormatLocaleFile(stderr, "CLIOption \"%s\" matched \"%s\"\n", option, cli_wand->command->mnemonic ); #endif option_type=(CommandOptionFlags) cli_wand->command->flags; if ( option_type == UndefinedOptionFlag ) CLIWandExceptionReturn(OptionFatalError,"UnrecognizedOption",option); assert( LocaleCompare(cli_wand->command->mnemonic,option) == 0 ); /* deprecated options */ if ( (option_type & DeprecateOptionFlag) != 0 ) CLIWandExceptionBreak(OptionError,"DeprecatedOptionNoCode",option); /* options that this module does not handle */ if ((option_type & (SpecialOptionFlag|GenesisOptionFlag)) != 0 ) CLIWandExceptionBreak(OptionFatalError,"InvalidUseOfOption",option); /* Get argument strings from VarArgs How can you determine if enough arguments was supplied? What happens if not enough arguments were supplied? */ { size_t count = (size_t) cli_wand->command->type; va_list operands; va_start(operands,option); arg1=arg2=NULL; if ( count >= 1 ) arg1=(const char *) va_arg(operands, const char *); if ( count >= 2 ) arg2=(const char *) va_arg(operands, const char *); va_end(operands); #if 0 (void) FormatLocaleFile(stderr, "CLIOption: \"%s\" Count: %ld Flags: %04x Args: \"%s\" \"%s\"\n", option,(long) count,option_type,arg1,arg2); #endif } /* Call the appropriate option handler */ /* FUTURE: this is temporary - get 'settings' to handle distribution of settings to images attributes,proprieties,artifacts */ if ( cli_wand->wand.images != (Image *) NULL ) (void) SyncImagesSettings(cli_wand->wand.image_info,cli_wand->wand.images, cli_wand->wand.exception); if ( (option_type & SettingOptionFlags) != 0 ) { CLISettingOptionInfo(cli_wand, option, arg1, arg2); /* FUTURE: Sync Specific Settings into Image Properities (not global) */ } /* Operators that do not need images - read, write, stack, clone */ if ((option_type & NoImageOperatorFlag) != 0) CLINoImageOperator(cli_wand, option, arg1, arg2); /* FUTURE: The not a setting part below is a temporary hack due to * some options being both a Setting and a Simple operator. * Specifically -monitor, -depth, and -colorspace */ if ( cli_wand->wand.images == (Image *) NULL ) if ( ((option_type & (SimpleOperatorFlag|ListOperatorFlag)) != 0 ) && ((option_type & SettingOptionFlags) == 0 )) /* temp hack */ CLIWandExceptionBreak(OptionError,"NoImagesFound",option); /* Operators which loop of individual images, simply */ if ( (option_type & SimpleOperatorFlag) != 0 && cli_wand->wand.images != (Image *) NULL) /* temp hack */ { ExceptionInfo *exception=AcquireExceptionInfo(); (void) CLISimpleOperatorImages(cli_wand, option, arg1, arg2,exception); exception=DestroyExceptionInfo(exception); } /* Operators that work on the image list as a whole */ if ( (option_type & ListOperatorFlag) != 0 ) (void) CLIListOperatorImages(cli_wand, option, arg1, arg2); DisableMSCWarning(4127) } while (0); /* end Break code block */ RestoreMSCWarning cli_wand->command = (const OptionInfo *) NULL; /* prevent re-use later */ }
./CrossVul/dataset_final_sorted/CWE-399/c/good_946_1
crossvul-cpp_data_good_5343_0
/* * Copyright (C) 2011 Grigori Goronzy <greg@chown.ath.cx> * * This file is part of libass. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "config.h" #include "ass_compat.h" #include "ass_shaper.h" #include "ass_render.h" #include "ass_font.h" #include "ass_parse.h" #include "ass_cache.h" #include <limits.h> #include <stdbool.h> #ifdef CONFIG_HARFBUZZ #include <hb-ft.h> enum { VERT = 0, VKNA, KERN, LIGA, CLIG }; #define NUM_FEATURES 5 #endif struct ass_shaper { ASS_ShapingLevel shaping_level; // FriBidi log2vis int n_glyphs; FriBidiChar *event_text; FriBidiCharType *ctypes; FriBidiLevel *emblevels; FriBidiStrIndex *cmap; FriBidiParType base_direction; #ifdef CONFIG_HARFBUZZ // OpenType features int n_features; hb_feature_t *features; hb_language_t language; // Glyph metrics cache, to speed up shaping Cache *metrics_cache; #endif }; #ifdef CONFIG_HARFBUZZ struct ass_shaper_metrics_data { Cache *metrics_cache; GlyphMetricsHashKey hash_key; int vertical; }; struct ass_shaper_font_data { hb_font_t *fonts[ASS_FONT_MAX_FACES]; hb_font_funcs_t *font_funcs[ASS_FONT_MAX_FACES]; struct ass_shaper_metrics_data *metrics_data[ASS_FONT_MAX_FACES]; }; #endif /** * \brief Print version information */ void ass_shaper_info(ASS_Library *lib) { ass_msg(lib, MSGL_INFO, "Shaper: FriBidi " FRIBIDI_VERSION " (SIMPLE)" #ifdef CONFIG_HARFBUZZ " HarfBuzz-ng %s (COMPLEX)", hb_version_string() #endif ); } /** * \brief grow arrays, if needed * \param new_size requested size */ static bool check_allocations(ASS_Shaper *shaper, size_t new_size) { if (new_size > shaper->n_glyphs) { if (!ASS_REALLOC_ARRAY(shaper->event_text, new_size) || !ASS_REALLOC_ARRAY(shaper->ctypes, new_size) || !ASS_REALLOC_ARRAY(shaper->emblevels, new_size) || !ASS_REALLOC_ARRAY(shaper->cmap, new_size)) return false; shaper->n_glyphs = new_size; } return true; } /** * \brief Free shaper and related data */ void ass_shaper_free(ASS_Shaper *shaper) { #ifdef CONFIG_HARFBUZZ ass_cache_done(shaper->metrics_cache); free(shaper->features); #endif free(shaper->event_text); free(shaper->ctypes); free(shaper->emblevels); free(shaper->cmap); free(shaper); } void ass_shaper_font_data_free(ASS_ShaperFontData *priv) { #ifdef CONFIG_HARFBUZZ int i; for (i = 0; i < ASS_FONT_MAX_FACES; i++) if (priv->fonts[i]) { free(priv->metrics_data[i]); hb_font_destroy(priv->fonts[i]); hb_font_funcs_destroy(priv->font_funcs[i]); } free(priv); #endif } #ifdef CONFIG_HARFBUZZ /** * \brief set up the HarfBuzz OpenType feature list with some * standard features. */ static bool init_features(ASS_Shaper *shaper) { shaper->features = calloc(sizeof(hb_feature_t), NUM_FEATURES); if (!shaper->features) return false; shaper->n_features = NUM_FEATURES; shaper->features[VERT].tag = HB_TAG('v', 'e', 'r', 't'); shaper->features[VERT].end = UINT_MAX; shaper->features[VKNA].tag = HB_TAG('v', 'k', 'n', 'a'); shaper->features[VKNA].end = UINT_MAX; shaper->features[KERN].tag = HB_TAG('k', 'e', 'r', 'n'); shaper->features[KERN].end = UINT_MAX; shaper->features[LIGA].tag = HB_TAG('l', 'i', 'g', 'a'); shaper->features[LIGA].end = UINT_MAX; shaper->features[CLIG].tag = HB_TAG('c', 'l', 'i', 'g'); shaper->features[CLIG].end = UINT_MAX; return true; } /** * \brief Set features depending on properties of the run */ static void set_run_features(ASS_Shaper *shaper, GlyphInfo *info) { // enable vertical substitutions for @font runs if (info->font->desc.vertical) shaper->features[VERT].value = shaper->features[VKNA].value = 1; else shaper->features[VERT].value = shaper->features[VKNA].value = 0; // disable ligatures if horizontal spacing is non-standard if (info->hspacing) shaper->features[LIGA].value = shaper->features[CLIG].value = 0; else shaper->features[LIGA].value = shaper->features[CLIG].value = 1; } /** * \brief Update HarfBuzz's idea of font metrics * \param hb_font HarfBuzz font * \param face associated FreeType font face */ static void update_hb_size(hb_font_t *hb_font, FT_Face face) { hb_font_set_scale (hb_font, ((uint64_t) face->size->metrics.x_scale * (uint64_t) face->units_per_EM) >> 16, ((uint64_t) face->size->metrics.y_scale * (uint64_t) face->units_per_EM) >> 16); hb_font_set_ppem (hb_font, face->size->metrics.x_ppem, face->size->metrics.y_ppem); } /* * Cached glyph metrics getters follow * * These functions replace HarfBuzz' standard FreeType font functions * and provide cached access to essential glyph metrics. This usually * speeds up shaping a lot. It also allows us to use custom load flags. * */ GlyphMetricsHashValue * get_cached_metrics(struct ass_shaper_metrics_data *metrics, FT_Face face, hb_codepoint_t unicode, hb_codepoint_t glyph) { GlyphMetricsHashValue *val; metrics->hash_key.glyph_index = glyph; if (ass_cache_get(metrics->metrics_cache, &metrics->hash_key, &val)) { if (val->metrics.width >= 0) return val; ass_cache_dec_ref(val); return NULL; } if (!val) return NULL; int load_flags = FT_LOAD_DEFAULT | FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH | FT_LOAD_IGNORE_TRANSFORM; if (FT_Load_Glyph(face, glyph, load_flags)) { val->metrics.width = -1; ass_cache_commit(val, 1); ass_cache_dec_ref(val); return NULL; } memcpy(&val->metrics, &face->glyph->metrics, sizeof(FT_Glyph_Metrics)); // if @font rendering is enabled and the glyph should be rotated, // make cached_h_advance pick up the right advance later if (metrics->vertical && unicode >= VERTICAL_LOWER_BOUND) val->metrics.horiAdvance = val->metrics.vertAdvance; ass_cache_commit(val, 1); return val; } static hb_bool_t get_glyph(hb_font_t *font, void *font_data, hb_codepoint_t unicode, hb_codepoint_t variation, hb_codepoint_t *glyph, void *user_data) { FT_Face face = font_data; struct ass_shaper_metrics_data *metrics_priv = user_data; if (variation) *glyph = FT_Face_GetCharVariantIndex(face, ass_font_index_magic(face, unicode), variation); else *glyph = FT_Get_Char_Index(face, ass_font_index_magic(face, unicode)); if (!*glyph) return false; // rotate glyph advances for @fonts while we still know the Unicode codepoints GlyphMetricsHashValue *metrics = get_cached_metrics(metrics_priv, face, unicode, *glyph); ass_cache_dec_ref(metrics); return true; } static hb_position_t cached_h_advance(hb_font_t *font, void *font_data, hb_codepoint_t glyph, void *user_data) { FT_Face face = font_data; struct ass_shaper_metrics_data *metrics_priv = user_data; GlyphMetricsHashValue *metrics = get_cached_metrics(metrics_priv, face, 0, glyph); if (!metrics) return 0; hb_position_t advance = metrics->metrics.horiAdvance; ass_cache_dec_ref(metrics); return advance; } static hb_position_t cached_v_advance(hb_font_t *font, void *font_data, hb_codepoint_t glyph, void *user_data) { FT_Face face = font_data; struct ass_shaper_metrics_data *metrics_priv = user_data; GlyphMetricsHashValue *metrics = get_cached_metrics(metrics_priv, face, 0, glyph); if (!metrics) return 0; hb_position_t advance = metrics->metrics.vertAdvance; ass_cache_dec_ref(metrics); return advance; } static hb_bool_t cached_h_origin(hb_font_t *font, void *font_data, hb_codepoint_t glyph, hb_position_t *x, hb_position_t *y, void *user_data) { return true; } static hb_bool_t cached_v_origin(hb_font_t *font, void *font_data, hb_codepoint_t glyph, hb_position_t *x, hb_position_t *y, void *user_data) { FT_Face face = font_data; struct ass_shaper_metrics_data *metrics_priv = user_data; GlyphMetricsHashValue *metrics = get_cached_metrics(metrics_priv, face, 0, glyph); if (!metrics) return false; *x = metrics->metrics.horiBearingX - metrics->metrics.vertBearingX; *y = metrics->metrics.horiBearingY - (-metrics->metrics.vertBearingY); ass_cache_dec_ref(metrics); return true; } static hb_position_t get_h_kerning(hb_font_t *font, void *font_data, hb_codepoint_t first, hb_codepoint_t second, void *user_data) { FT_Face face = font_data; FT_Vector kern; if (FT_Get_Kerning(face, first, second, FT_KERNING_DEFAULT, &kern)) return 0; return kern.x; } static hb_position_t get_v_kerning(hb_font_t *font, void *font_data, hb_codepoint_t first, hb_codepoint_t second, void *user_data) { return 0; } static hb_bool_t cached_extents(hb_font_t *font, void *font_data, hb_codepoint_t glyph, hb_glyph_extents_t *extents, void *user_data) { FT_Face face = font_data; struct ass_shaper_metrics_data *metrics_priv = user_data; GlyphMetricsHashValue *metrics = get_cached_metrics(metrics_priv, face, 0, glyph); if (!metrics) return false; extents->x_bearing = metrics->metrics.horiBearingX; extents->y_bearing = metrics->metrics.horiBearingY; extents->width = metrics->metrics.width; extents->height = -metrics->metrics.height; ass_cache_dec_ref(metrics); return true; } static hb_bool_t get_contour_point(hb_font_t *font, void *font_data, hb_codepoint_t glyph, unsigned int point_index, hb_position_t *x, hb_position_t *y, void *user_data) { FT_Face face = font_data; int load_flags = FT_LOAD_DEFAULT | FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH | FT_LOAD_IGNORE_TRANSFORM; if (FT_Load_Glyph(face, glyph, load_flags)) return false; if (point_index >= (unsigned)face->glyph->outline.n_points) return false; *x = face->glyph->outline.points[point_index].x; *y = face->glyph->outline.points[point_index].y; return true; } /** * \brief Retrieve HarfBuzz font from cache. * Create it from FreeType font, if needed. * \param info glyph cluster * \return HarfBuzz font */ static hb_font_t *get_hb_font(ASS_Shaper *shaper, GlyphInfo *info) { ASS_Font *font = info->font; hb_font_t **hb_fonts; if (!font->shaper_priv) font->shaper_priv = calloc(sizeof(ASS_ShaperFontData), 1); hb_fonts = font->shaper_priv->fonts; if (!hb_fonts[info->face_index]) { hb_fonts[info->face_index] = hb_ft_font_create(font->faces[info->face_index], NULL); // set up cached metrics access font->shaper_priv->metrics_data[info->face_index] = calloc(sizeof(struct ass_shaper_metrics_data), 1); struct ass_shaper_metrics_data *metrics = font->shaper_priv->metrics_data[info->face_index]; metrics->metrics_cache = shaper->metrics_cache; metrics->vertical = info->font->desc.vertical; hb_font_funcs_t *funcs = hb_font_funcs_create(); font->shaper_priv->font_funcs[info->face_index] = funcs; hb_font_funcs_set_glyph_func(funcs, get_glyph, metrics, NULL); hb_font_funcs_set_glyph_h_advance_func(funcs, cached_h_advance, metrics, NULL); hb_font_funcs_set_glyph_v_advance_func(funcs, cached_v_advance, metrics, NULL); hb_font_funcs_set_glyph_h_origin_func(funcs, cached_h_origin, metrics, NULL); hb_font_funcs_set_glyph_v_origin_func(funcs, cached_v_origin, metrics, NULL); hb_font_funcs_set_glyph_h_kerning_func(funcs, get_h_kerning, metrics, NULL); hb_font_funcs_set_glyph_v_kerning_func(funcs, get_v_kerning, metrics, NULL); hb_font_funcs_set_glyph_extents_func(funcs, cached_extents, metrics, NULL); hb_font_funcs_set_glyph_contour_point_func(funcs, get_contour_point, metrics, NULL); hb_font_set_funcs(hb_fonts[info->face_index], funcs, font->faces[info->face_index], NULL); } ass_face_set_size(font->faces[info->face_index], info->font_size); update_hb_size(hb_fonts[info->face_index], font->faces[info->face_index]); // update hash key for cached metrics struct ass_shaper_metrics_data *metrics = font->shaper_priv->metrics_data[info->face_index]; metrics->hash_key.font = info->font; metrics->hash_key.face_index = info->face_index; metrics->hash_key.size = info->font_size; metrics->hash_key.scale_x = double_to_d6(info->scale_x); metrics->hash_key.scale_y = double_to_d6(info->scale_y); return hb_fonts[info->face_index]; } /** * \brief Map script to default language. * * This maps a script to a language, if a script has a representative * language it is typically used with. Otherwise, the invalid language * is returned. * * The mapping is similar to Pango's pango-language.c. * * \param script script tag * \return language tag */ static hb_language_t script_to_language(hb_script_t script) { switch (script) { // Unicode 1.1 case HB_SCRIPT_ARABIC: return hb_language_from_string("ar", -1); break; case HB_SCRIPT_ARMENIAN: return hb_language_from_string("hy", -1); break; case HB_SCRIPT_BENGALI: return hb_language_from_string("bn", -1); break; case HB_SCRIPT_CANADIAN_ABORIGINAL: return hb_language_from_string("iu", -1); break; case HB_SCRIPT_CHEROKEE: return hb_language_from_string("chr", -1); break; case HB_SCRIPT_COPTIC: return hb_language_from_string("cop", -1); break; case HB_SCRIPT_CYRILLIC: return hb_language_from_string("ru", -1); break; case HB_SCRIPT_DEVANAGARI: return hb_language_from_string("hi", -1); break; case HB_SCRIPT_GEORGIAN: return hb_language_from_string("ka", -1); break; case HB_SCRIPT_GREEK: return hb_language_from_string("el", -1); break; case HB_SCRIPT_GUJARATI: return hb_language_from_string("gu", -1); break; case HB_SCRIPT_GURMUKHI: return hb_language_from_string("pa", -1); break; case HB_SCRIPT_HANGUL: return hb_language_from_string("ko", -1); break; case HB_SCRIPT_HEBREW: return hb_language_from_string("he", -1); break; case HB_SCRIPT_HIRAGANA: return hb_language_from_string("ja", -1); break; case HB_SCRIPT_KANNADA: return hb_language_from_string("kn", -1); break; case HB_SCRIPT_KATAKANA: return hb_language_from_string("ja", -1); break; case HB_SCRIPT_LAO: return hb_language_from_string("lo", -1); break; case HB_SCRIPT_LATIN: return hb_language_from_string("en", -1); break; case HB_SCRIPT_MALAYALAM: return hb_language_from_string("ml", -1); break; case HB_SCRIPT_MONGOLIAN: return hb_language_from_string("mn", -1); break; case HB_SCRIPT_ORIYA: return hb_language_from_string("or", -1); break; case HB_SCRIPT_SYRIAC: return hb_language_from_string("syr", -1); break; case HB_SCRIPT_TAMIL: return hb_language_from_string("ta", -1); break; case HB_SCRIPT_TELUGU: return hb_language_from_string("te", -1); break; case HB_SCRIPT_THAI: return hb_language_from_string("th", -1); break; // Unicode 2.0 case HB_SCRIPT_TIBETAN: return hb_language_from_string("bo", -1); break; // Unicode 3.0 case HB_SCRIPT_ETHIOPIC: return hb_language_from_string("am", -1); break; case HB_SCRIPT_KHMER: return hb_language_from_string("km", -1); break; case HB_SCRIPT_MYANMAR: return hb_language_from_string("my", -1); break; case HB_SCRIPT_SINHALA: return hb_language_from_string("si", -1); break; case HB_SCRIPT_THAANA: return hb_language_from_string("dv", -1); break; // Unicode 3.2 case HB_SCRIPT_BUHID: return hb_language_from_string("bku", -1); break; case HB_SCRIPT_HANUNOO: return hb_language_from_string("hnn", -1); break; case HB_SCRIPT_TAGALOG: return hb_language_from_string("tl", -1); break; case HB_SCRIPT_TAGBANWA: return hb_language_from_string("tbw", -1); break; // Unicode 4.0 case HB_SCRIPT_UGARITIC: return hb_language_from_string("uga", -1); break; // Unicode 4.1 case HB_SCRIPT_BUGINESE: return hb_language_from_string("bug", -1); break; case HB_SCRIPT_OLD_PERSIAN: return hb_language_from_string("peo", -1); break; case HB_SCRIPT_SYLOTI_NAGRI: return hb_language_from_string("syl", -1); break; // Unicode 5.0 case HB_SCRIPT_NKO: return hb_language_from_string("nko", -1); break; // no representative language exists default: return HB_LANGUAGE_INVALID; break; } } /** * \brief Determine language to be used for shaping a run. * * \param shaper shaper instance * \param script script tag associated with run * \return language tag */ static hb_language_t hb_shaper_get_run_language(ASS_Shaper *shaper, hb_script_t script) { hb_language_t lang; // override set, use it if (shaper->language != HB_LANGUAGE_INVALID) return shaper->language; // get default language for given script lang = script_to_language(script); // no dice, use system default if (lang == HB_LANGUAGE_INVALID) lang = hb_language_get_default(); return lang; } /** * \brief Feed a run of shaped characters into the GlyphInfo array. * * \param glyphs GlyphInfo array * \param buf buffer of shaped run * \param offset offset into GlyphInfo array */ static void shape_harfbuzz_process_run(GlyphInfo *glyphs, hb_buffer_t *buf, int offset) { int j; int num_glyphs = hb_buffer_get_length(buf); hb_glyph_info_t *glyph_info = hb_buffer_get_glyph_infos(buf, NULL); hb_glyph_position_t *pos = hb_buffer_get_glyph_positions(buf, NULL); for (j = 0; j < num_glyphs; j++) { unsigned idx = glyph_info[j].cluster + offset; GlyphInfo *info = glyphs + idx; GlyphInfo *root = info; // if we have more than one glyph per cluster, allocate a new one // and attach to the root glyph if (info->skip == 0) { while (info->next) info = info->next; info->next = malloc(sizeof(GlyphInfo)); if (info->next) { memcpy(info->next, info, sizeof(GlyphInfo)); ass_cache_inc_ref(info->font); info = info->next; info->next = NULL; } } // set position and advance info->skip = 0; info->glyph_index = glyph_info[j].codepoint; info->offset.x = pos[j].x_offset * info->scale_x; info->offset.y = -pos[j].y_offset * info->scale_y; info->advance.x = pos[j].x_advance * info->scale_x; info->advance.y = -pos[j].y_advance * info->scale_y; // accumulate advance in the root glyph root->cluster_advance.x += info->advance.x; root->cluster_advance.y += info->advance.y; } } /** * \brief Shape event text with HarfBuzz. Full OpenType shaping. * \param glyphs glyph clusters * \param len number of clusters */ static void shape_harfbuzz(ASS_Shaper *shaper, GlyphInfo *glyphs, size_t len) { int i; hb_buffer_t *buf = hb_buffer_create(); hb_segment_properties_t props = HB_SEGMENT_PROPERTIES_DEFAULT; // Initialize: skip all glyphs, this is undone later as needed for (i = 0; i < len; i++) glyphs[i].skip = 1; for (i = 0; i < len; i++) { int offset = i; hb_font_t *font = get_hb_font(shaper, glyphs + offset); int level = glyphs[offset].shape_run_id; int direction = shaper->emblevels[offset] % 2; // advance in text until end of run while (i < (len - 1) && level == glyphs[i+1].shape_run_id) i++; hb_buffer_pre_allocate(buf, i - offset + 1); hb_buffer_add_utf32(buf, shaper->event_text + offset, i - offset + 1, 0, i - offset + 1); props.direction = direction ? HB_DIRECTION_RTL : HB_DIRECTION_LTR; props.script = glyphs[offset].script; props.language = hb_shaper_get_run_language(shaper, props.script); hb_buffer_set_segment_properties(buf, &props); set_run_features(shaper, glyphs + offset); hb_shape(font, buf, shaper->features, shaper->n_features); shape_harfbuzz_process_run(glyphs, buf, offset); hb_buffer_reset(buf); } hb_buffer_destroy(buf); } /** * \brief Determine script property of all characters. Characters of script * common and inherited get their script from their context. * */ void ass_shaper_determine_script(ASS_Shaper *shaper, GlyphInfo *glyphs, size_t len) { int i; int backwards_scan = 0; hb_unicode_funcs_t *ufuncs = hb_unicode_funcs_get_default(); hb_script_t last_script = HB_SCRIPT_UNKNOWN; // determine script (forward scan) for (i = 0; i < len; i++) { GlyphInfo *info = glyphs + i; info->script = hb_unicode_script(ufuncs, info->symbol); // common/inherit codepoints inherit script from context if (info->script == HB_SCRIPT_COMMON || info->script == HB_SCRIPT_INHERITED) { // unknown is not a valid context if (last_script != HB_SCRIPT_UNKNOWN) info->script = last_script; else // do a backwards scan to check if next codepoint // contains a valid script for context backwards_scan = 1; } else { last_script = info->script; } } // determine script (backwards scan, if needed) last_script = HB_SCRIPT_UNKNOWN; for (i = len - 1; i >= 0 && backwards_scan; i--) { GlyphInfo *info = glyphs + i; // common/inherit codepoints inherit script from context if (info->script == HB_SCRIPT_COMMON || info->script == HB_SCRIPT_INHERITED) { // unknown script is not a valid context if (last_script != HB_SCRIPT_UNKNOWN) info->script = last_script; } else { last_script = info->script; } } } #endif /** * \brief Shape event text with FriBidi. Does mirroring and simple * Arabic shaping. * \param len number of clusters */ static void shape_fribidi(ASS_Shaper *shaper, GlyphInfo *glyphs, size_t len) { int i; FriBidiJoiningType *joins = calloc(sizeof(*joins), len); // shape on codepoint level fribidi_get_joining_types(shaper->event_text, len, joins); fribidi_join_arabic(shaper->ctypes, len, shaper->emblevels, joins); fribidi_shape(FRIBIDI_FLAGS_DEFAULT | FRIBIDI_FLAGS_ARABIC, shaper->emblevels, len, joins, shaper->event_text); // update indexes for (i = 0; i < len; i++) { GlyphInfo *info = glyphs + i; FT_Face face = info->font->faces[info->face_index]; info->symbol = shaper->event_text[i]; info->glyph_index = FT_Get_Char_Index(face, ass_font_index_magic(face, shaper->event_text[i])); } free(joins); } /** * \brief Toggle kerning for HarfBuzz shaping. * \param shaper shaper instance * \param kern toggle kerning */ void ass_shaper_set_kerning(ASS_Shaper *shaper, int kern) { #ifdef CONFIG_HARFBUZZ shaper->features[KERN].value = !!kern; #endif } /** * \brief Find shape runs according to the event's selected fonts */ void ass_shaper_find_runs(ASS_Shaper *shaper, ASS_Renderer *render_priv, GlyphInfo *glyphs, size_t len) { int i; int shape_run = 0; #ifdef CONFIG_HARFBUZZ ass_shaper_determine_script(shaper, glyphs, len); #endif // find appropriate fonts for the shape runs for (i = 0; i < len; i++) { GlyphInfo *last = glyphs + i - 1; GlyphInfo *info = glyphs + i; // skip drawings if (info->symbol == 0xfffc) continue; // set size and get glyph index ass_font_get_index(render_priv->fontselect, info->font, info->symbol, &info->face_index, &info->glyph_index); // shape runs break on: xbord, ybord, xshad, yshad, // all four colors, all four alphas, be, blur, fn, fs, // fscx, fscy, fsp, bold, italic, underline, strikeout, // frx, fry, frz, fax, fay, karaoke start, karaoke type, // and on every line break if (i > 0 && (last->font != info->font || last->face_index != info->face_index || last->script != info->script || last->font_size != info->font_size || last->c[0] != info->c[0] || last->c[1] != info->c[1] || last->c[2] != info->c[2] || last->c[3] != info->c[3] || last->be != info->be || last->blur != info->blur || last->shadow_x != info->shadow_x || last->shadow_y != info->shadow_y || last->frx != info->frx || last->fry != info->fry || last->frz != info->frz || last->fax != info->fax || last->fay != info->fay || last->scale_x != info->scale_x || last->scale_y != info->scale_y || last->border_style != info->border_style || last->border_x != info->border_x || last->border_y != info->border_y || last->hspacing != info->hspacing || last->italic != info->italic || last->bold != info->bold || last->flags != info->flags)) shape_run++; info->shape_run_id = shape_run; } } /** * \brief Set base direction (paragraph direction) of the text. * \param dir base direction */ void ass_shaper_set_base_direction(ASS_Shaper *shaper, FriBidiParType dir) { shaper->base_direction = dir; } /** * \brief Set language hint. Some languages have specific character variants, * like Serbian Cyrillic. * \param lang ISO 639-1 two-letter language code */ void ass_shaper_set_language(ASS_Shaper *shaper, const char *code) { #ifdef CONFIG_HARFBUZZ hb_language_t lang; if (code) lang = hb_language_from_string(code, -1); else lang = HB_LANGUAGE_INVALID; shaper->language = lang; #endif } /** * Set shaping level. Essentially switches between FriBidi and HarfBuzz. */ void ass_shaper_set_level(ASS_Shaper *shaper, ASS_ShapingLevel level) { shaper->shaping_level = level; } /** * \brief Remove all zero-width invisible characters from the text. * \param text_info text */ static void ass_shaper_skip_characters(TextInfo *text_info) { int i; GlyphInfo *glyphs = text_info->glyphs; for (i = 0; i < text_info->length; i++) { // Skip direction override control characters if ((glyphs[i].symbol <= 0x202e && glyphs[i].symbol >= 0x202a) || (glyphs[i].symbol <= 0x200f && glyphs[i].symbol >= 0x200b) || (glyphs[i].symbol <= 0x2063 && glyphs[i].symbol >= 0x2060) || glyphs[i].symbol == 0xfeff || glyphs[i].symbol == 0x00ad || glyphs[i].symbol == 0x034f) { glyphs[i].symbol = 0; glyphs[i].skip++; } } } /** * \brief Shape an event's text. Calculates directional runs and shapes them. * \param text_info event's text * \return success, when 0 */ int ass_shaper_shape(ASS_Shaper *shaper, TextInfo *text_info) { int i, ret, last_break; FriBidiParType dir; GlyphInfo *glyphs = text_info->glyphs; if (!check_allocations(shaper, text_info->length)) return -1; // Get bidi character types and embedding levels last_break = 0; for (i = 0; i < text_info->length; i++) { shaper->event_text[i] = glyphs[i].symbol; // embedding levels should be calculated paragraph by paragraph if (glyphs[i].symbol == '\n' || i == text_info->length - 1) { dir = shaper->base_direction; fribidi_get_bidi_types(shaper->event_text + last_break, i - last_break + 1, shaper->ctypes + last_break); ret = fribidi_get_par_embedding_levels(shaper->ctypes + last_break, i - last_break + 1, &dir, shaper->emblevels + last_break); if (ret == 0) return -1; last_break = i + 1; } } // add embedding levels to shape runs for final runs for (i = 0; i < text_info->length; i++) { glyphs[i].shape_run_id += shaper->emblevels[i]; } #ifdef CONFIG_HARFBUZZ switch (shaper->shaping_level) { case ASS_SHAPING_SIMPLE: shape_fribidi(shaper, glyphs, text_info->length); ass_shaper_skip_characters(text_info); break; case ASS_SHAPING_COMPLEX: shape_harfbuzz(shaper, glyphs, text_info->length); break; } #else shape_fribidi(shaper, glyphs, text_info->length); ass_shaper_skip_characters(text_info); #endif return 0; } /** * \brief Create a new shaper instance and preallocate data structures * \param prealloc preallocation size */ ASS_Shaper *ass_shaper_new(size_t prealloc) { ASS_Shaper *shaper = calloc(sizeof(*shaper), 1); if (!shaper) return NULL; shaper->base_direction = FRIBIDI_PAR_ON; if (!check_allocations(shaper, prealloc)) goto error; #ifdef CONFIG_HARFBUZZ if (!init_features(shaper)) goto error; shaper->metrics_cache = ass_glyph_metrics_cache_create(); if (!shaper->metrics_cache) goto error; #endif return shaper; error: ass_shaper_free(shaper); return NULL; } /** * \brief clean up additional data temporarily needed for shaping and * (e.g. additional glyphs allocated) */ void ass_shaper_cleanup(ASS_Shaper *shaper, TextInfo *text_info) { int i; for (i = 0; i < text_info->length; i++) { GlyphInfo *info = text_info->glyphs + i; info = info->next; while (info) { GlyphInfo *next = info->next; free(info); info = next; } } } /** * \brief Calculate reorder map to render glyphs in visual order * \param shaper shaper instance * \param text_info text to be reordered * \return map of reordered characters, or NULL */ FriBidiStrIndex *ass_shaper_reorder(ASS_Shaper *shaper, TextInfo *text_info) { int i, ret; // Initialize reorder map for (i = 0; i < text_info->length; i++) shaper->cmap[i] = i; // Create reorder map line-by-line for (i = 0; i < text_info->n_lines; i++) { LineInfo *line = text_info->lines + i; FriBidiParType dir = FRIBIDI_PAR_ON; ret = fribidi_reorder_line(0, shaper->ctypes + line->offset, line->len, 0, dir, shaper->emblevels + line->offset, NULL, shaper->cmap + line->offset); if (ret == 0) return NULL; } return shaper->cmap; } /** * \brief Resolve a Windows font charset number to a suitable base * direction. Generally, use LTR for compatibility with VSFilter. The * special value -1, which is not a legal Windows font charset number, * can be used for autodetection. * \param enc Windows font encoding */ FriBidiParType resolve_base_direction(int enc) { switch (enc) { case -1: return FRIBIDI_PAR_ON; default: return FRIBIDI_PAR_LTR; } }
./CrossVul/dataset_final_sorted/CWE-399/c/good_5343_0
crossvul-cpp_data_bad_3763_0
/* * NET An implementation of the SOCKET network access protocol. * * Version: @(#)socket.c 1.1.93 18/02/95 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Anonymous : NOTSOCK/BADF cleanup. Error fix in * shutdown() * Alan Cox : verify_area() fixes * Alan Cox : Removed DDI * Jonathan Kamens : SOCK_DGRAM reconnect bug * Alan Cox : Moved a load of checks to the very * top level. * Alan Cox : Move address structures to/from user * mode above the protocol layers. * Rob Janssen : Allow 0 length sends. * Alan Cox : Asynchronous I/O support (cribbed from the * tty drivers). * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) * Jeff Uphoff : Made max number of sockets command-line * configurable. * Matti Aarnio : Made the number of sockets dynamic, * to be allocated when needed, and mr. * Uphoff's max is used as max to be * allowed to allocate. * Linus : Argh. removed all the socket allocation * altogether: it's in the inode now. * Alan Cox : Made sock_alloc()/sock_release() public * for NetROM and future kernel nfsd type * stuff. * Alan Cox : sendmsg/recvmsg basics. * Tom Dyas : Export net symbols. * Marcin Dalecki : Fixed problems with CONFIG_NET="n". * Alan Cox : Added thread locking to sys_* calls * for sockets. May have errors at the * moment. * Kevin Buhr : Fixed the dumb errors in the above. * Andi Kleen : Some small cleanups, optimizations, * and fixed a copy_from_user() bug. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) * Tigran Aivazian : Made listen(2) backlog sanity checks * protocol-independent * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * This module is effectively the top level interface to the BSD socket * paradigm. * * Based upon Swansea University Computer Society NET3.039 */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/thread_info.h> #include <linux/rcupdate.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/wanrouter.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> #include <linux/if_vlan.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/kmod.h> #include <linux/audit.h> #include <linux/wireless.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <net/compat.h> #include <net/wext.h> #include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/if_tun.h> #include <linux/ipv6_route.h> #include <linux/route.h> #include <linux/sockios.h> #include <linux/atalk.h> static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); static unsigned int sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. */ static const struct file_operations socket_file_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .aio_read = sock_aio_read, .aio_write = sock_aio_write, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, #endif .mmap = sock_mmap, .open = sock_no_open, /* special open code to disallow open via /proc */ .release = sock_close, .fasync = sock_fasync, .sendpage = sock_sendpage, .splice_write = generic_splice_sendpage, .splice_read = sock_splice_read, }; /* * The protocol list. Each protocol is registered in here. */ static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; /* * Statistics counters of the socket lists */ static DEFINE_PER_CPU(int, sockets_in_use); /* * Support routines. * Move socket addresses back and forth across the kernel/user * divide and look after the messy bits. */ /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space * @kaddr: Address in kernel space * @ulen: Length in user space * * The address is copied into kernel space. If the provided address is * too long an error code of -EINVAL is returned. If the copy gives * invalid addresses -EFAULT is returned. On a success 0 is returned. */ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); } /** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0 || len > sizeof(struct sockaddr_storage)) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); } static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; wq = kmalloc(sizeof(*wq), GFP_KERNEL); if (!wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; RCU_INIT_POINTER(ei->socket.wq, wq); ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; } static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); wq = rcu_dereference_protected(ei->socket.wq, 1); kfree_rcu(wq, rcu); kmem_cache_free(sock_inode_cachep, ei); } static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; } static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode = sock_destroy_inode, .statfs = simple_statfs, }; /* * sockfs_dname() is called from d_path(). */ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", dentry->d_inode->i_ino); } static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "socket:", &sockfs_ops, &sockfs_dentry_operations, SOCKFS_MAGIC); } static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */ static int sock_alloc_file(struct socket *sock, struct file **f, int flags) { struct qstr name = { .name = "" }; struct path path; struct file *file; int fd; fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); if (unlikely(!path.dentry)) { put_unused_fd(fd); return -ENOMEM; } path.mnt = mntget(sock_mnt); d_instantiate(path.dentry, SOCK_INODE(sock)); SOCK_INODE(sock)->i_fop = &socket_file_ops; file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (unlikely(!file)) { /* drop dentry, keep inode */ ihold(path.dentry->d_inode); path_put(&path); put_unused_fd(fd); return -ENFILE; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->f_pos = 0; file->private_data = sock; *f = file; return fd; } int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = sock_alloc_file(sock, &newfile, flags); if (likely(fd >= 0)) fd_install(fd, newfile); return fd; } EXPORT_SYMBOL(sock_map_fd); struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; } EXPORT_SYMBOL(sock_from_file); /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * too is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */ struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; } EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct file *file; struct socket *sock; *err = -EBADF; file = fget_light(fd, fput_needed); if (file) { sock = sock_from_file(file, err); if (sock) return sock; fput_light(file, *fput_needed); } return NULL; } /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */ static struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode_pseudo(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); this_cpu_add(sockets_in_use, 1); return sock; } /* * In theory you can't get an open on this inode, but /proc provides * a back door. Remember to keep it shut otherwise you'll let the * creepy crawlies in. */ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) { return -ENXIO; } const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, .llseek = noop_llseek, }; /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */ void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags)) return; this_cpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; } EXPORT_SYMBOL(sock_release); int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) { *tx_flags = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; if (sock_flag(sk, SOCK_WIFI_STATUS)) *tx_flags |= SKBTX_WIFI_STATUS; return 0; } EXPORT_SYMBOL(sock_tx_timestamp); static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; return sock->ops->sendmsg(iocb, sock, msg, size); } static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { int err = security_socket_sendmsg(sock, msg, size); return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); } int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_sendmsg); static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg_nosec(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec; msg->msg_iovlen = num; result = sock_sendmsg(sock, msg, size); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_sendmsg); static int ktime2ts(ktime_t kt, struct timespec *ts) { if (kt.tv64) { *ts = ktime_to_timespec(kt); return 1; } else { return 0; } } /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct timespec ts[3]; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp.tv64 == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { skb_get_timestampns(skb, &ts[0]); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts[0]), &ts[0]); } } memset(ts, 0, sizeof(ts)); if (skb->tstamp.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) { skb_get_timestampns(skb, ts + 0); empty = 0; } if (shhwtstamps) { if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) && ktime2ts(shhwtstamps->syststamp, ts + 1)) empty = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) && ktime2ts(shhwtstamps->hwtstamp, ts + 2)) empty = 0; } if (!empty) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(ts), &ts); } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int ack; if (!sock_flag(sk, SOCK_WIFI_STATUS)) return; if (!skb->wifi_acked_valid) return; ack = skb->wifi_acked; put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); } EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, sizeof(__u32), &skb->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { sock_recv_timestamp(msg, sk, skb); sock_recv_drops(msg, sk, skb); } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; si->flags = flags; return sock->ops->recvmsg(iocb, sock, msg, size, flags); } static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); } int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_recvmsg); static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } /** * kernel_recvmsg - Receive a message from a socket (kernel space) * @sock: The socket to receive the message from * @msg: Received message * @vec: Input s/g array for message data * @num: Size of input s/g array * @size: Number of bytes to read * @flags: Message flags (MSG_DONTWAIT, etc...) * * On return the msg structure contains the scatter/gather array passed in the * vec argument. The array is modified so that it consists of the unfilled * portion of the original array. * * The returned value is the total number of bytes received, or an error. */ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num; result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_recvmsg); static void sock_aio_dtor(struct kiocb *iocb) { kfree(iocb->private); } static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ flags |= more; return kernel_sendpage(sock, page, offset, size, flags); } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) return -EINVAL; sock_update_classid(sock->sk); return sock->ops->splice_read(sock, ppos, pipe, len, flags); } static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, struct sock_iocb *siocb) { if (!is_sync_kiocb(iocb)) { siocb = kmalloc(sizeof(*siocb), GFP_KERNEL); if (!siocb) return NULL; iocb->ki_dtor = sock_aio_dtor; } siocb->kiocb = iocb; iocb->private = siocb; return siocb; } static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); } static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; if (iocb->ki_left == 0) /* Match SYS5 behaviour */ return 0; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; if (sock->type == SOCK_SEQPACKET) msg->msg_flags |= MSG_EOR; return __sock_sendmsg(iocb, sock, msg, size); } static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } /* * Atomic setting of ioctl hooks to avoid race * with module unload. */ static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook) (struct net *, void __user *arg); void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook) (unsigned int, void __user *); void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) { mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; err = sock->ops->ioctl(sock, cmd, arg); /* * If this ioctl is unknown try to hand it down * to the NIC driver. */ if (err == -ENOIOCTLCMD) err = dev_ioctl(net, cmd, argp); return err; } /* * With an ioctl, arg may well be a user mode pointer, but we don't know * what to do with it - that's up to the protocol still. */ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; struct net *net; sock = file->private_data; sk = sock->sk; net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WEXT_CORE if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { err = dev_ioctl(net, cmd, argp); } else #endif switch (cmd) { case FIOSETOWN: case SIOCSPGRP: err = -EFAULT; if (get_user(pid, (int __user *)argp)) break; err = f_setown(sock->file, pid, 1); break; case FIOGETOWN: case SIOCGPGRP: err = put_user(f_getown(sock->file), (int __user *)argp); break; case SIOCGIFBR: case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: err = -ENOPKG; if (!br_ioctl_hook) request_module("bridge"); mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: err = -ENOPKG; if (!vlan_ioctl_hook) request_module("8021q"); mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: case SIOCDELDLCI: err = -ENOPKG; if (!dlci_ioctl_hook) request_module("dlci"); mutex_lock(&dlci_ioctl_mutex); if (dlci_ioctl_hook) err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; } int sock_create_lite(int family, int type, int protocol, struct socket **res) { int err; struct socket *sock = NULL; err = security_socket_create(family, type, protocol, 1); if (err) goto out; sock = sock_alloc(); if (!sock) { err = -ENOMEM; goto out; } sock->type = type; err = security_socket_post_create(sock, family, type, protocol, 1); if (err) goto out_release; out: *res = sock; return err; out_release: sock_release(sock); sock = NULL; goto out; } EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; return sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) { struct socket *sock = file->private_data; return sock->ops->mmap(file, sock, vma); } static int sock_close(struct inode *inode, struct file *filp) { /* * It was possible the inode is NULL we were * closing an unfinished socket. */ if (!inode) { printk(KERN_DEBUG "sock_close: NULL inode\n"); return 0; } sock_release(SOCKET_I(inode)); return 0; } /* * Update the socket async list * * Fasync_list locking strategy. * * 1. fasync_list is modified only under process context socket lock * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) * or under socket lock */ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; struct socket_wq *wq; if (sk == NULL) return -EINVAL; lock_sock(sk); wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) sock_reset_flag(sk, SOCK_FASYNC); else sock_set_flag(sk, SOCK_FASYNC); release_sock(sk); return 0; } /* This function may be called only under socket lock or callback_lock or rcu_lock */ int sock_wake_async(struct socket *sock, int how, int band) { struct socket_wq *wq; if (!sock) return -1; rcu_read_lock(); wq = rcu_dereference(sock->wq); if (!wq || !wq->fasync_list) { rcu_read_unlock(); return -1; } switch (how) { case SOCK_WAKE_WAITD: if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) break; /* fall through */ case SOCK_WAKE_IO: call_kill: kill_fasync(&wq->fasync_list, SIGIO, band); break; case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(sock_wake_async); int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { static int warned; if (!warned) { warned = 1; printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); } family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { net_warn_ratelimited("socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; } EXPORT_SYMBOL(__sock_create); int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } EXPORT_SYMBOL(sock_create); int sock_create_kern(int family, int type, int protocol, struct socket **res) { return __sock_create(&init_net, family, type, protocol, res, 1); } EXPORT_SYMBOL(sock_create_kern); SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { int retval; struct socket *sock; int flags; /* Check the SOCK_* constants for consistency. */ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; out: /* It may be already another descriptor 8) Not kernel problem. */ return retval; out_release: sock_release(sock); return retval; } /* * Create a pair of connected sockets. */ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec) { struct socket *sock1, *sock2; int fd1, fd2, err; struct file *newfile1, *newfile2; int flags; flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; /* * Obtain the first socket and check if the underlying protocol * supports the socketpair call. */ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; err = sock1->ops->socketpair(sock1, sock2); if (err < 0) goto out_release_both; fd1 = sock_alloc_file(sock1, &newfile1, flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } fd2 = sock_alloc_file(sock2, &newfile2, flags); if (unlikely(fd2 < 0)) { err = fd2; fput(newfile1); put_unused_fd(fd1); sock_release(sock2); goto out; } audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. * Not kernel problem. */ err = put_user(fd1, &usockvec[0]); if (!err) err = put_user(fd2, &usockvec[1]); if (!err) return 0; sys_close(fd2); sys_close(fd1); return err; out_release_both: sock_release(sock2); out_release_1: sock_release(sock1); out: return err; } /* * Bind a name to a socket. Nothing much to do here since it's * the protocol's responsibility to handle the local address. * * We move the socket address to kernel space before we call * the protocol layer (having also checked the address is ok). */ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, &address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; } /* * Perform a listen. Basically, we allow the protocol to do anything * necessary for a listen, and if that works, we mark the socket as * ready for listening. */ SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned int)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; } /* * For accept, we attempt to create a new socket, set up the link * with the client, wake up the client, then return the new * connected fd. We collect the address of the connector in kernel * space and move it to user at the very end. This is unclean because * we open the socket then return an error. * * 1003.1g adds the ability to recvmsg() to query connection pending * status to recvmsg. We need to add that support in a way thats * clean when we restucture accept also. */ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = sock_alloc_file(newsock, &newfile, flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user(&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; } SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen) { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* * Attempt to connect to a socket with the server address. The address * is in user space so we verify it is OK and move it to kernel space. * * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to * break bindings * * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and * other SEQPACKET protocols that take time to connect() as it doesn't * include the -EINPROGRESS status for such sockets. */ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = move_addr_to_kernel(uservaddr, addrlen, &address); if (err < 0) goto out_put; err = security_socket_connect(sock, (struct sockaddr *)&address, addrlen); if (err) goto out_put; err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the local address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the remote address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; } /* * Send a datagram to a given address. We move the address into kernel * space and check the user space data area is readable before invoking * the protocol. */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, &address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Send a datagram down a socket. */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned int, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } /* * Receive a frame from the socket and optionally record the address of the * sender. We verify the buffers are writable and if needed move the * sender address from kernel to user space. */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; } /* * Receive a datagram from a socket. */ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, unsigned int flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, char __user *, optval, int, optlen) { int err, fput_needed; struct socket *sock; if (optlen < 0) return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_setsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, char __user *, optval, int __user *, optlen) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Shutdown a socket. */ SYSCALL_DEFINE2(shutdown, int, fd, int, how) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_shutdown(sock, how); if (!err) err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } return err; } /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. */ #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) struct used_address { struct sockaddr_storage name; unsigned int name_len; }; static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct sockaddr_storage address; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; int err, ctl_len, total_len; err = -EFAULT; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; err = -ENOMEM; iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), GFP_KERNEL); if (!iov) goto out; } /* This will also move the address data into kernel space */ if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ); } else err = verify_iovec(msg_sys, iov, &address, VERIFY_READ); if (err < 0) goto out_freeiov; total_len = err; err = -ENOBUFS; if (msg_sys->msg_controllen > INT_MAX) goto out_freeiov; ctl_len = msg_sys->msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { err = cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) goto out_freeiov; ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) goto out_freeiov; } err = -EFAULT; /* * Careful! Before this, msg_sys->msg_control contains a user pointer. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted * checking falls down on this. */ if (copy_from_user(ctl_buf, (void __user __force *)msg_sys->msg_control, ctl_len)) goto out_freectl; msg_sys->msg_control = ctl_buf; } msg_sys->msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) msg_sys->msg_flags |= MSG_DONTWAIT; /* * If this is sendmmsg() and current destination address is same as * previously succeeded address, omit asking LSM's decision. * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ if (used_address && msg_sys->msg_name && used_address->name_len == msg_sys->msg_namelen && !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; } err = sock_sendmsg(sock, msg_sys, total_len); /* * If this is sendmmsg() and sending to current destination address was * successful, remember it. */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; if (msg_sys->msg_name) memcpy(&used_address->name, msg_sys->msg_name, used_address->name_len); } out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: if (iov != iovstack) kfree(iov); out: return err; } /* * BSD sendmsg interface */ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; } /* * Linux sendmmsg interface */ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct used_address used_address; if (vlen > UIO_MAXIOV) vlen = UIO_MAXIOV; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; used_address.name_len = UINT_MAX; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; err = 0; while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags, &used_address); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = __sys_sendmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags, &used_address); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; } fput_light(sock->file, fput_needed); /* We only return an error if no datagrams were able to be sent */ if (datagrams != 0) return datagrams; return err; } SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { return __sys_sendmmsg(fd, mmsg, vlen, flags); } static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int err, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; err = -ENOMEM; iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), GFP_KERNEL); if (!iov) goto out; } /* * Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ uaddr = (__force void __user *)msg_sys->msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); } else err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE); if (err < 0) goto out_freeiov; total_len = err; cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user(&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: if (iov != iovstack) kfree(iov); out: return err; } /* * BSD recvmsg interface */ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } /* * Linux recvmmsg interface */ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct timespec end_time; if (timeout && poll_select_set_timeout(&end_time, timeout->tv_sec, timeout->tv_nsec)) return -EINVAL; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; err = sock_error(sock->sk); if (err) goto out_put; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; while (datagrams < vlen) { /* * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = __sys_recvmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ if (flags & MSG_WAITFORONE) flags |= MSG_DONTWAIT; if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); if (timeout->tv_sec < 0) { timeout->tv_sec = timeout->tv_nsec = 0; break; } /* Timeout, return less than vlen datagrams */ if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) break; } /* Out of band data, return right away */ if (msg_sys.msg_flags & MSG_OOB) break; } out_put: fput_light(sock->file, fput_needed); if (err == 0) return datagrams; if (datagrams != 0) { /* * We may return less entries than requested (vlen) if the * sock is non block and there aren't enough datagrams... */ if (err != -EAGAIN) { /* * ... or if recvmsg returns an error after we * received some datagrams, where we record the * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ sock->sk->sk_err = -err; } return datagrams; } return err; } SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct timespec __user *, timeout) { int datagrams; struct timespec timeout_sys; if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) return -EFAULT; datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); if (datagrams > 0 && copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) datagrams = -EFAULT; return datagrams; } #ifdef __ARCH_WANT_SYS_SOCKETCALL /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[21] = { AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), AL(4), AL(5), AL(4) }; #undef AL /* * System call vectors. * * Argument checking cleaned up. Saved 20% in size. * This function doesn't need to set the kernel lock because * it is set by the callees. */ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { unsigned long a[6]; unsigned long a0, a1; int err; unsigned int len; if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; len = nargs[call]; if (len > sizeof(a)) return -EINVAL; /* copy_from_user should be SMP safe. */ if (copy_from_user(a, args, len)) return -EFAULT; audit_socketcall(nargs[call] / sizeof(unsigned long), a); a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: err = sys_socket(a0, a1, a[2]); break; case SYS_BIND: err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_CONNECT: err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_LISTEN: err = sys_listen(a0, a1); break; case SYS_ACCEPT: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = sys_getsockname(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_GETPEERNAME: err = sys_getpeername(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_SOCKETPAIR: err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); break; case SYS_SEND: err = sys_send(a0, (void __user *)a1, a[2], a[3]); break; case SYS_SENDTO: err = sys_sendto(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_RECV: err = sys_recv(a0, (void __user *)a1, a[2], a[3]); break; case SYS_RECVFROM: err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], (int __user *)a[5]); break; case SYS_SHUTDOWN: err = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); break; case SYS_GETSOCKOPT: err = sys_getsockopt(a0, a1, a[2], (char __user *)a[3], (int __user *)a[4]); break; case SYS_SENDMSG: err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], (struct timespec __user *)a[4]); break; case SYS_ACCEPT4: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], a[3]); break; default: err = -EINVAL; break; } return err; } #endif /* __ARCH_WANT_SYS_SOCKETCALL */ /** * sock_register - add a socket protocol handler * @ops: description of protocol * * This function is called by a protocol handler that wants to * advertise its address family, and have it linked into the * socket interface. The value ops->family coresponds to the * socket system call protocol family. */ int sock_register(const struct net_proto_family *ops) { int err; if (ops->family >= NPROTO) { printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); return -ENOBUFS; } spin_lock(&net_family_lock); if (rcu_dereference_protected(net_families[ops->family], lockdep_is_held(&net_family_lock))) err = -EEXIST; else { rcu_assign_pointer(net_families[ops->family], ops); err = 0; } spin_unlock(&net_family_lock); printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); return err; } EXPORT_SYMBOL(sock_register); /** * sock_unregister - remove a protocol handler * @family: protocol family to remove * * This function is called by a protocol handler that wants to * remove its address family, and have it unlinked from the * new socket creation. * * If protocol handler is a module, then it can use module reference * counts to protect against new references. If protocol handler is not * a module then it needs to provide its own protection in * the ops->create routine. */ void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); RCU_INIT_POINTER(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); } EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; /* * Initialize the network sysctl infrastructure. */ err = net_sysctl_init(); if (err) goto out; /* * Initialize sock SLAB cache. */ sk_init(); /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER netfilter_init(); #endif #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING skb_timestamping_init(); #endif out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; } core_initcall(sock_init); /* early initcall */ #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { int cpu; int counter = 0; for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ if (counter < 0) counter = 0; seq_printf(seq, "sockets: used %d\n", counter); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) err = compat_put_timeval(up, &ktv); return err; } static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) err = compat_put_timespec(up, &kts); return err; } static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(struct ifreq)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; err = dev_ioctl(net, SIOCGIFNAME, uifr); if (err) return err; if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) return -EFAULT; return 0; } static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) { struct compat_ifconf ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct compat_ifreq __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * sizeof(struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = dev_ioctl(net, SIOCGIFCONF, uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) return -EFAULT; return 0; } static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_SRXCLSRLINS: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void *)(&rxnfc->fs.m_ext + 1) - (void *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void *)(&rxnfc->fs.location + 1) - (void *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void *)(&rxnfc->fs.m_ext + 1) - (const void *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void *)(&rxnfc->fs.location + 1) - (const void *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) return -EFAULT; return dev_ioctl(net, SIOCWANDEV, uifr); } static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; struct ifreq __user *uifr; mm_segment_t old_fs; int err; u32 data; void __user *datap; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &uifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, uifr); default: return -ENOIOCTLCMD; } } static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *u_ifreq32) { struct ifreq __user *u_ifreq64; char tmp_buf[IFNAMSIZ]; void __user *data64; u32 data32; if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), IFNAMSIZ)) return -EFAULT; if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) return -EFAULT; data64 = compat_ptr(data32); u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); /* Don't check these user accesses, just let that get trapped * in the ioctl handler instead. */ if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ)) return -EFAULT; if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, u_ifreq64); } static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) return -EFAULT; err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); if (!err) { switch (cmd) { case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFMEM: case SIOCGIFHWADDR: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFDSTADDR: case SIOCGIFNETMASK: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) err = -EFAULT; break; } } return err; } static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq ifr; struct compat_ifmap __user *uifmap32; mm_segment_t old_fs; int err; uifmap32 = &uifr32->ifr_ifru.ifru_map; err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __get_user(ifr.ifr_map.port, &uifmap32->port); if (err) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (void __user __force *)&ifr); set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __put_user(ifr.ifr_map.port, &uifmap32->port); if (err) err = -EFAULT; } return err; } static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_data)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_data)) return -EFAULT; return dev_ioctl(net, SIOCSHWTSTAMP, uifr); } struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ u32 rt_mtu; /* per route MTU/Window */ u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; struct in6_rtmsg32 { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; static int routing_ioctl(struct net *net, struct socket *sock, unsigned int cmd, void __user *argp) { int ret; void *r = NULL; struct in6_rtmsg r6; struct rtentry r4; char devname[16]; u32 rtdev; mm_segment_t old_fs = get_fs(); if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); ret |= __get_user(r4.rt_flags, &(ur4->rt_flags)); ret |= __get_user(r4.rt_metric, &(ur4->rt_metric)); ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu)); ret |= __get_user(r4.rt_window, &(ur4->rt_window)); ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt)); ret |= __get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user(devname, compat_ptr(rtdev), 15); r4.rt_dev = (char __user __force *)devname; devname[15] = 0; } else r4.rt_dev = NULL; r = (void *) &r4; } if (ret) { ret = -EFAULT; goto out; } set_fs(KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs(old_fs); out: return ret; } /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE * for some operations; this forces use of the newer bridge-utils that * use compatible ioctls */ static int old_bridge_ioctl(compat_ulong_t __user *argp) { compat_ulong_t tmp; if (get_user(tmp, argp)) return -EFAULT; if (tmp == BRCTL_GET_VERSION) return BRCTL_VERSION + 1; return -EINVAL; } static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return siocdevprivate_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCSHWTSTAMP: return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } return -ENOIOCTLCMD; } static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; } #endif int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } EXPORT_SYMBOL(kernel_bind); int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } EXPORT_SYMBOL(kernel_listen); int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { struct sock *sk = sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock); if (err < 0) goto done; err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); *newsock = NULL; goto done; } (*newsock)->ops = sock->ops; __module_get((*newsock)->ops->owner); done: return err; } EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } EXPORT_SYMBOL(kernel_connect); int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } EXPORT_SYMBOL(kernel_getsockname); int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } EXPORT_SYMBOL(kernel_getpeername); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int __user *uoptlen; int err; uoptval = (char __user __force *) optval; uoptlen = (int __user __force *) optlen; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); else err = sock->ops->getsockopt(sock, level, optname, uoptval, uoptlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_getsockopt); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int err; uoptval = (char __user __force *) optval; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, uoptval, optlen); else err = sock->ops->setsockopt(sock, level, optname, uoptval, optlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_setsockopt); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { sock_update_classid(sock->sk); if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } EXPORT_SYMBOL(kernel_sendpage); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); err = sock->ops->ioctl(sock, cmd, arg); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_sock_ioctl); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(kernel_sock_shutdown);
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3763_0
crossvul-cpp_data_good_4984_4
/* * sysctl.c: General linux system control interface * * Begun 24 March 1995, Stephen Tweedie * Added /proc support, Dec 1995 * Added bdflush entry and intvec min/max checking, 2/23/96, Tom Dyas. * Added hooks for /proc/sys/net (minor, minor patch), 96/4/1, Mike Shaver. * Added kernel/java-{interpreter,appletviewer}, 96/5/10, Mike Shaver. * Dynamic registration fixes, Stephen Tweedie. * Added kswapd-interval, ctrl-alt-del, printk stuff, 1/8/97, Chris Horn. * Made sysctl support optional via CONFIG_SYSCTL, 1/10/97, Chris * Horn. * Added proc_doulongvec_ms_jiffies_minmax, 09/08/99, Carlos H. Bauer. * Added proc_doulongvec_minmax, 09/08/99, Carlos H. Bauer. * Changed linked lists to use list.h instead of lists.h, 02/24/00, Bill * Wendling. * The list_for_each() macro wasn't appropriate for the sysctl loop. * Removed it and replaced it with older style, 03/23/00, Bill Wendling */ #include <linux/module.h> #include <linux/aio.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/bitmap.h> #include <linux/signal.h> #include <linux/printk.h> #include <linux/proc_fs.h> #include <linux/security.h> #include <linux/ctype.h> #include <linux/kmemcheck.h> #include <linux/kmemleak.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kobject.h> #include <linux/net.h> #include <linux/sysrq.h> #include <linux/highuid.h> #include <linux/writeback.h> #include <linux/ratelimit.h> #include <linux/compaction.h> #include <linux/hugetlb.h> #include <linux/initrd.h> #include <linux/key.h> #include <linux/times.h> #include <linux/limits.h> #include <linux/dcache.h> #include <linux/dnotify.h> #include <linux/syscalls.h> #include <linux/vmstat.h> #include <linux/nfs_fs.h> #include <linux/acpi.h> #include <linux/reboot.h> #include <linux/ftrace.h> #include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/pipe_fs_i.h> #include <linux/oom.h> #include <linux/kmod.h> #include <linux/capability.h> #include <linux/binfmts.h> #include <linux/sched/sysctl.h> #include <linux/kexec.h> #include <linux/bpf.h> #include <asm/uaccess.h> #include <asm/processor.h> #ifdef CONFIG_X86 #include <asm/nmi.h> #include <asm/stacktrace.h> #include <asm/io.h> #endif #ifdef CONFIG_SPARC #include <asm/setup.h> #endif #ifdef CONFIG_BSD_PROCESS_ACCT #include <linux/acct.h> #endif #ifdef CONFIG_RT_MUTEXES #include <linux/rtmutex.h> #endif #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT) #include <linux/lockdep.h> #endif #ifdef CONFIG_CHR_DEV_SG #include <scsi/sg.h> #endif #ifdef CONFIG_LOCKUP_DETECTOR #include <linux/nmi.h> #endif #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ extern int suid_dumpable; #ifdef CONFIG_COREDUMP extern int core_uses_pid; extern char core_pattern[]; extern unsigned int core_pipe_limit; #endif extern int pid_max; extern int pid_max_min, pid_max_max; extern int percpu_pagelist_fraction; extern int compat_log; extern int latencytop_enabled; extern int sysctl_nr_open_min, sysctl_nr_open_max; #ifndef CONFIG_MMU extern int sysctl_nr_trim_pages; #endif /* Constants used for minimum and maximum */ #ifdef CONFIG_LOCKUP_DETECTOR static int sixty = 60; #endif static int __maybe_unused neg_one = -1; static int zero; static int __maybe_unused one = 1; static int __maybe_unused two = 2; static int __maybe_unused four = 4; static unsigned long one_ul = 1; static int one_hundred = 100; #ifdef CONFIG_PRINTK static int ten_thousand = 10000; #endif /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; static int minolduid; static int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */ #ifdef CONFIG_DETECT_HUNG_TASK static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); #endif #ifdef CONFIG_INOTIFY_USER #include <linux/inotify.h> #endif #ifdef CONFIG_SPARC #endif #ifdef __hppa__ extern int pwrsw_enabled; #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW extern int unaligned_enabled; #endif #ifdef CONFIG_IA64 extern int unaligned_dump_stack; #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN extern int no_unaligned_warning; #endif #ifdef CONFIG_PROC_SYSCTL #define SYSCTL_WRITES_LEGACY -1 #define SYSCTL_WRITES_WARN 0 #define SYSCTL_WRITES_STRICT 1 static int sysctl_writes_strict = SYSCTL_WRITES_WARN; static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #ifdef CONFIG_PRINTK static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #ifdef CONFIG_COREDUMP static int proc_dostring_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #ifdef CONFIG_MAGIC_SYSRQ /* Note: sysrq code uses it's own private copy */ static int __sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; static int sysrq_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error; error = proc_dointvec(table, write, buffer, lenp, ppos); if (error) return error; if (write) sysrq_toggle_support(__sysrq_enabled); return 0; } #endif static struct ctl_table kern_table[]; static struct ctl_table vm_table[]; static struct ctl_table fs_table[]; static struct ctl_table debug_table[]; static struct ctl_table dev_table[]; extern struct ctl_table random_table[]; #ifdef CONFIG_EPOLL extern struct ctl_table epoll_table[]; #endif #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT int sysctl_legacy_va_layout; #endif /* The default sysctl tables: */ static struct ctl_table sysctl_base_table[] = { { .procname = "kernel", .mode = 0555, .child = kern_table, }, { .procname = "vm", .mode = 0555, .child = vm_table, }, { .procname = "fs", .mode = 0555, .child = fs_table, }, { .procname = "debug", .mode = 0555, .child = debug_table, }, { .procname = "dev", .mode = 0555, .child = dev_table, }, { } }; #ifdef CONFIG_SCHED_DEBUG static int min_sched_granularity_ns = 100000; /* 100 usecs */ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int min_wakeup_granularity_ns; /* 0 usecs */ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ #ifdef CONFIG_SMP static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1; #endif /* CONFIG_SMP */ #endif /* CONFIG_SCHED_DEBUG */ #ifdef CONFIG_COMPACTION static int min_extfrag_threshold; static int max_extfrag_threshold = 1000; #endif static struct ctl_table kern_table[] = { { .procname = "sched_child_runs_first", .data = &sysctl_sched_child_runs_first, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_SCHED_DEBUG { .procname = "sched_min_granularity_ns", .data = &sysctl_sched_min_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, { .procname = "sched_latency_ns", .data = &sysctl_sched_latency, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, { .procname = "sched_wakeup_granularity_ns", .data = &sysctl_sched_wakeup_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_wakeup_granularity_ns, .extra2 = &max_wakeup_granularity_ns, }, #ifdef CONFIG_SMP { .procname = "sched_tunable_scaling", .data = &sysctl_sched_tunable_scaling, .maxlen = sizeof(enum sched_tunable_scaling), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_tunable_scaling, .extra2 = &max_sched_tunable_scaling, }, { .procname = "sched_migration_cost_ns", .data = &sysctl_sched_migration_cost, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_nr_migrate", .data = &sysctl_sched_nr_migrate, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_time_avg_ms", .data = &sysctl_sched_time_avg, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_shares_window_ns", .data = &sysctl_sched_shares_window, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif /* CONFIG_SMP */ #ifdef CONFIG_NUMA_BALANCING { .procname = "numa_balancing_scan_delay_ms", .data = &sysctl_numa_balancing_scan_delay, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_period_min_ms", .data = &sysctl_numa_balancing_scan_period_min, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_period_max_ms", .data = &sysctl_numa_balancing_scan_period_max, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_size_mb", .data = &sysctl_numa_balancing_scan_size, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &one, }, { .procname = "numa_balancing", .data = NULL, /* filled in by handler */ .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sysctl_numa_balancing, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ { .procname = "sched_rt_period_us", .data = &sysctl_sched_rt_period, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_rt_handler, }, { .procname = "sched_rt_runtime_us", .data = &sysctl_sched_rt_runtime, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rt_handler, }, { .procname = "sched_rr_timeslice_ms", .data = &sched_rr_timeslice, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rr_handler, }, #ifdef CONFIG_SCHED_AUTOGROUP { .procname = "sched_autogroup_enabled", .data = &sysctl_sched_autogroup_enabled, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_CFS_BANDWIDTH { .procname = "sched_cfs_bandwidth_slice_us", .data = &sysctl_sched_cfs_bandwidth_slice, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &one, }, #endif #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", .data = &prove_locking, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_LOCK_STAT { .procname = "lock_stat", .data = &lock_stat, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "panic", .data = &panic_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_COREDUMP { .procname = "core_uses_pid", .data = &core_uses_pid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "core_pattern", .data = core_pattern, .maxlen = CORENAME_MAX_SIZE, .mode = 0644, .proc_handler = proc_dostring_coredump, }, { .procname = "core_pipe_limit", .data = &core_pipe_limit, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_taint, }, { .procname = "sysctl_writes_strict", .data = &sysctl_writes_strict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &neg_one, .extra2 = &one, }, #endif #ifdef CONFIG_LATENCYTOP { .procname = "latencytop", .data = &latencytop_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BLK_DEV_INITRD { .procname = "real-root-dev", .data = &real_root_dev, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "print-fatal-signals", .data = &print_fatal_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_SPARC { .procname = "reboot-cmd", .data = reboot_command, .maxlen = 256, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "stop-a", .data = &stop_a_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "scons-poweroff", .data = &scons_pwroff, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SPARC64 { .procname = "tsb-ratio", .data = &sysctl_tsb_ratio, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef __hppa__ { .procname = "soft-power", .data = &pwrsw_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW { .procname = "unaligned-trap", .data = &unaligned_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "ctrl-alt-del", .data = &C_A_D, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_FUNCTION_TRACER { .procname = "ftrace_enabled", .data = &ftrace_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = ftrace_enable_sysctl, }, #endif #ifdef CONFIG_STACK_TRACER { .procname = "stack_tracer_enabled", .data = &stack_tracer_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = stack_trace_sysctl, }, #endif #ifdef CONFIG_TRACING { .procname = "ftrace_dump_on_oops", .data = &ftrace_dump_on_oops, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "traceoff_on_warning", .data = &__disable_trace_on_warning, .maxlen = sizeof(__disable_trace_on_warning), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "tracepoint_printk", .data = &tracepoint_printk, .maxlen = sizeof(tracepoint_printk), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_KEXEC_CORE { .procname = "kexec_load_disabled", .data = &kexec_load_disabled, .maxlen = sizeof(int), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif #ifdef CONFIG_MODULES { .procname = "modprobe", .data = &modprobe_path, .maxlen = KMOD_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "modules_disabled", .data = &modules_disabled, .maxlen = sizeof(int), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif #ifdef CONFIG_UEVENT_HELPER { .procname = "hotplug", .data = &uevent_helper, .maxlen = UEVENT_HELPER_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, #endif #ifdef CONFIG_CHR_DEV_SG { .procname = "sg-big-buff", .data = &sg_big_buff, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BSD_PROCESS_ACCT { .procname = "acct", .data = &acct_parm, .maxlen = 3*sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MAGIC_SYSRQ { .procname = "sysrq", .data = &__sysrq_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = sysrq_sysctl_handler, }, #endif #ifdef CONFIG_PROC_SYSCTL { .procname = "cad_pid", .data = NULL, .maxlen = sizeof (int), .mode = 0600, .proc_handler = proc_do_cad_pid, }, #endif { .procname = "threads-max", .data = NULL, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sysctl_max_threads, }, { .procname = "random", .mode = 0555, .child = random_table, }, { .procname = "usermodehelper", .mode = 0555, .child = usermodehelper_table, }, { .procname = "overflowuid", .data = &overflowuid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, { .procname = "overflowgid", .data = &overflowgid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, #ifdef CONFIG_S390 #ifdef CONFIG_MATHEMU { .procname = "ieee_emulation_warnings", .data = &sysctl_ieee_emulation_warnings, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "userprocess_debug", .data = &show_unhandled_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "pid_max", .data = &pid_max, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, { .procname = "panic_on_oops", .data = &panic_on_oops, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #if defined CONFIG_PRINTK { .procname = "printk", .data = &console_loglevel, .maxlen = 4*sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "printk_ratelimit", .data = &printk_ratelimit_state.interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "printk_ratelimit_burst", .data = &printk_ratelimit_state.burst, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "printk_delay", .data = &printk_delay_msec, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &ten_thousand, }, { .procname = "dmesg_restrict", .data = &dmesg_restrict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_sysadmin, .extra1 = &zero, .extra2 = &one, }, { .procname = "kptr_restrict", .data = &kptr_restrict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_sysadmin, .extra1 = &zero, .extra2 = &two, }, #endif { .procname = "ngroups_max", .data = &ngroups_max, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "cap_last_cap", .data = (void *)&cap_last_cap, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, #if defined(CONFIG_LOCKUP_DETECTOR) { .procname = "watchdog", .data = &watchdog_user_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_watchdog, .extra1 = &zero, .extra2 = &one, }, { .procname = "watchdog_thresh", .data = &watchdog_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_watchdog_thresh, .extra1 = &zero, .extra2 = &sixty, }, { .procname = "nmi_watchdog", .data = &nmi_watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_nmi_watchdog, .extra1 = &zero, #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) .extra2 = &one, #else .extra2 = &zero, #endif }, { .procname = "soft_watchdog", .data = &soft_watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_soft_watchdog, .extra1 = &zero, .extra2 = &one, }, { .procname = "watchdog_cpumask", .data = &watchdog_cpumask_bits, .maxlen = NR_CPUS, .mode = 0644, .proc_handler = proc_watchdog_cpumask, }, { .procname = "softlockup_panic", .data = &softlockup_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #ifdef CONFIG_HARDLOCKUP_DETECTOR { .procname = "hardlockup_panic", .data = &hardlockup_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_SMP { .procname = "softlockup_all_cpu_backtrace", .data = &sysctl_softlockup_all_cpu_backtrace, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "hardlockup_all_cpu_backtrace", .data = &sysctl_hardlockup_all_cpu_backtrace, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_SMP */ #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) { .procname = "unknown_nmi_panic", .data = &unknown_nmi_panic, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_X86) { .procname = "panic_on_unrecovered_nmi", .data = &panic_on_unrecovered_nmi, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "panic_on_io_nmi", .data = &panic_on_io_nmi, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_DEBUG_STACKOVERFLOW { .procname = "panic_on_stackoverflow", .data = &sysctl_panic_on_stackoverflow, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "bootloader_type", .data = &bootloader_type, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "bootloader_version", .data = &bootloader_version, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "kstack_depth_to_print", .data = &kstack_depth_to_print, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "io_delay_type", .data = &io_delay_type, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_MMU) { .procname = "randomize_va_space", .data = &randomize_va_space, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_S390) && defined(CONFIG_SMP) { .procname = "spin_retry", .data = &spin_retry, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) { .procname = "acpi_video_flags", .data = &acpi_realmode_flags, .maxlen = sizeof (unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN { .procname = "ignore-unaligned-usertrap", .data = &no_unaligned_warning, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_IA64 { .procname = "unaligned-dump-stack", .data = &unaligned_dump_stack, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DETECT_HUNG_TASK { .procname = "hung_task_panic", .data = &sysctl_hung_task_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "hung_task_check_count", .data = &sysctl_hung_task_check_count, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "hung_task_timeout_secs", .data = &sysctl_hung_task_timeout_secs, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_dohung_task_timeout_secs, .extra2 = &hung_task_timeout_max, }, { .procname = "hung_task_warnings", .data = &sysctl_hung_task_warnings, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &neg_one, }, #endif #ifdef CONFIG_COMPAT { .procname = "compat-log", .data = &compat_log, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_RT_MUTEXES { .procname = "max_lock_depth", .data = &max_lock_depth, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "poweroff_cmd", .data = &poweroff_cmd, .maxlen = POWEROFF_CMD_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, #ifdef CONFIG_KEYS { .procname = "keys", .mode = 0555, .child = key_sysctls, }, #endif #ifdef CONFIG_PERF_EVENTS /* * User-space scripts rely on the existence of this file * as a feature check for perf_events being enabled. * * So it's an ABI, do not remove! */ { .procname = "perf_event_paranoid", .data = &sysctl_perf_event_paranoid, .maxlen = sizeof(sysctl_perf_event_paranoid), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "perf_event_mlock_kb", .data = &sysctl_perf_event_mlock, .maxlen = sizeof(sysctl_perf_event_mlock), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "perf_event_max_sample_rate", .data = &sysctl_perf_event_sample_rate, .maxlen = sizeof(sysctl_perf_event_sample_rate), .mode = 0644, .proc_handler = perf_proc_update_handler, .extra1 = &one, }, { .procname = "perf_cpu_time_max_percent", .data = &sysctl_perf_cpu_time_max_percent, .maxlen = sizeof(sysctl_perf_cpu_time_max_percent), .mode = 0644, .proc_handler = perf_cpu_time_max_percent_handler, .extra1 = &zero, .extra2 = &one_hundred, }, #endif #ifdef CONFIG_KMEMCHECK { .procname = "kmemcheck", .data = &kmemcheck_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "panic_on_warn", .data = &panic_on_warn, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) { .procname = "timer_migration", .data = &sysctl_timer_migration, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = timer_migration_handler, }, #endif #ifdef CONFIG_BPF_SYSCALL { .procname = "unprivileged_bpf_disabled", .data = &sysctl_unprivileged_bpf_disabled, .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif { } }; static struct ctl_table vm_table[] = { { .procname = "overcommit_memory", .data = &sysctl_overcommit_memory, .maxlen = sizeof(sysctl_overcommit_memory), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, }, { .procname = "panic_on_oom", .data = &sysctl_panic_on_oom, .maxlen = sizeof(sysctl_panic_on_oom), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, }, { .procname = "oom_kill_allocating_task", .data = &sysctl_oom_kill_allocating_task, .maxlen = sizeof(sysctl_oom_kill_allocating_task), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "oom_dump_tasks", .data = &sysctl_oom_dump_tasks, .maxlen = sizeof(sysctl_oom_dump_tasks), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, .maxlen = sizeof(sysctl_overcommit_ratio), .mode = 0644, .proc_handler = overcommit_ratio_handler, }, { .procname = "overcommit_kbytes", .data = &sysctl_overcommit_kbytes, .maxlen = sizeof(sysctl_overcommit_kbytes), .mode = 0644, .proc_handler = overcommit_kbytes_handler, }, { .procname = "page-cluster", .data = &page_cluster, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "dirty_background_ratio", .data = &dirty_background_ratio, .maxlen = sizeof(dirty_background_ratio), .mode = 0644, .proc_handler = dirty_background_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "dirty_background_bytes", .data = &dirty_background_bytes, .maxlen = sizeof(dirty_background_bytes), .mode = 0644, .proc_handler = dirty_background_bytes_handler, .extra1 = &one_ul, }, { .procname = "dirty_ratio", .data = &vm_dirty_ratio, .maxlen = sizeof(vm_dirty_ratio), .mode = 0644, .proc_handler = dirty_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "dirty_bytes", .data = &vm_dirty_bytes, .maxlen = sizeof(vm_dirty_bytes), .mode = 0644, .proc_handler = dirty_bytes_handler, .extra1 = &dirty_bytes_min, }, { .procname = "dirty_writeback_centisecs", .data = &dirty_writeback_interval, .maxlen = sizeof(dirty_writeback_interval), .mode = 0644, .proc_handler = dirty_writeback_centisecs_handler, }, { .procname = "dirty_expire_centisecs", .data = &dirty_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "dirtytime_expire_seconds", .data = &dirtytime_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, .proc_handler = dirtytime_interval_handler, .extra1 = &zero, }, { .procname = "nr_pdflush_threads", .mode = 0444 /* read-only */, .proc_handler = pdflush_proc_obsolete, }, { .procname = "swappiness", .data = &vm_swappiness, .maxlen = sizeof(vm_swappiness), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one_hundred, }, #ifdef CONFIG_HUGETLB_PAGE { .procname = "nr_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_sysctl_handler, }, #ifdef CONFIG_NUMA { .procname = "nr_hugepages_mempolicy", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = &hugetlb_mempolicy_sysctl_handler, }, #endif { .procname = "hugetlb_shm_group", .data = &sysctl_hugetlb_shm_group, .maxlen = sizeof(gid_t), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "hugepages_treat_as_movable", .data = &hugepages_treat_as_movable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "nr_overcommit_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_overcommit_handler, }, #endif { .procname = "lowmem_reserve_ratio", .data = &sysctl_lowmem_reserve_ratio, .maxlen = sizeof(sysctl_lowmem_reserve_ratio), .mode = 0644, .proc_handler = lowmem_reserve_ratio_sysctl_handler, }, { .procname = "drop_caches", .data = &sysctl_drop_caches, .maxlen = sizeof(int), .mode = 0644, .proc_handler = drop_caches_sysctl_handler, .extra1 = &one, .extra2 = &four, }, #ifdef CONFIG_COMPACTION { .procname = "compact_memory", .data = &sysctl_compact_memory, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, }, { .procname = "extfrag_threshold", .data = &sysctl_extfrag_threshold, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sysctl_extfrag_handler, .extra1 = &min_extfrag_threshold, .extra2 = &max_extfrag_threshold, }, { .procname = "compact_unevictable_allowed", .data = &sysctl_compact_unevictable_allowed, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_COMPACTION */ { .procname = "min_free_kbytes", .data = &min_free_kbytes, .maxlen = sizeof(min_free_kbytes), .mode = 0644, .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = &zero, }, { .procname = "percpu_pagelist_fraction", .data = &percpu_pagelist_fraction, .maxlen = sizeof(percpu_pagelist_fraction), .mode = 0644, .proc_handler = percpu_pagelist_fraction_sysctl_handler, .extra1 = &zero, }, #ifdef CONFIG_MMU { .procname = "max_map_count", .data = &sysctl_max_map_count, .maxlen = sizeof(sysctl_max_map_count), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, #else { .procname = "nr_trim_pages", .data = &sysctl_nr_trim_pages, .maxlen = sizeof(sysctl_nr_trim_pages), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, #endif { .procname = "laptop_mode", .data = &laptop_mode, .maxlen = sizeof(laptop_mode), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "block_dump", .data = &block_dump, .maxlen = sizeof(block_dump), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, { .procname = "vfs_cache_pressure", .data = &sysctl_vfs_cache_pressure, .maxlen = sizeof(sysctl_vfs_cache_pressure), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT { .procname = "legacy_va_layout", .data = &sysctl_legacy_va_layout, .maxlen = sizeof(sysctl_legacy_va_layout), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif #ifdef CONFIG_NUMA { .procname = "zone_reclaim_mode", .data = &zone_reclaim_mode, .maxlen = sizeof(zone_reclaim_mode), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, { .procname = "min_unmapped_ratio", .data = &sysctl_min_unmapped_ratio, .maxlen = sizeof(sysctl_min_unmapped_ratio), .mode = 0644, .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "min_slab_ratio", .data = &sysctl_min_slab_ratio, .maxlen = sizeof(sysctl_min_slab_ratio), .mode = 0644, .proc_handler = sysctl_min_slab_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, #endif #ifdef CONFIG_SMP { .procname = "stat_interval", .data = &sysctl_stat_interval, .maxlen = sizeof(sysctl_stat_interval), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #endif #ifdef CONFIG_MMU { .procname = "mmap_min_addr", .data = &dac_mmap_min_addr, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = mmap_min_addr_handler, }, #endif #ifdef CONFIG_NUMA { .procname = "numa_zonelist_order", .data = &numa_zonelist_order, .maxlen = NUMA_ZONELIST_ORDER_LEN, .mode = 0644, .proc_handler = numa_zonelist_order_handler, }, #endif #if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \ (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) { .procname = "vdso_enabled", #ifdef CONFIG_X86_32 .data = &vdso32_enabled, .maxlen = sizeof(vdso32_enabled), #else .data = &vdso_enabled, .maxlen = sizeof(vdso_enabled), #endif .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif #ifdef CONFIG_HIGHMEM { .procname = "highmem_is_dirtyable", .data = &vm_highmem_is_dirtyable, .maxlen = sizeof(vm_highmem_is_dirtyable), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_MEMORY_FAILURE { .procname = "memory_failure_early_kill", .data = &sysctl_memory_failure_early_kill, .maxlen = sizeof(sysctl_memory_failure_early_kill), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "memory_failure_recovery", .data = &sysctl_memory_failure_recovery, .maxlen = sizeof(sysctl_memory_failure_recovery), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif { .procname = "user_reserve_kbytes", .data = &sysctl_user_reserve_kbytes, .maxlen = sizeof(sysctl_user_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "admin_reserve_kbytes", .data = &sysctl_admin_reserve_kbytes, .maxlen = sizeof(sysctl_admin_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS { .procname = "mmap_rnd_bits", .data = &mmap_rnd_bits, .maxlen = sizeof(mmap_rnd_bits), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&mmap_rnd_bits_min, .extra2 = (void *)&mmap_rnd_bits_max, }, #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS { .procname = "mmap_rnd_compat_bits", .data = &mmap_rnd_compat_bits, .maxlen = sizeof(mmap_rnd_compat_bits), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&mmap_rnd_compat_bits_min, .extra2 = (void *)&mmap_rnd_compat_bits_max, }, #endif { } }; static struct ctl_table fs_table[] = { { .procname = "inode-nr", .data = &inodes_stat, .maxlen = 2*sizeof(long), .mode = 0444, .proc_handler = proc_nr_inodes, }, { .procname = "inode-state", .data = &inodes_stat, .maxlen = 7*sizeof(long), .mode = 0444, .proc_handler = proc_nr_inodes, }, { .procname = "file-nr", .data = &files_stat, .maxlen = sizeof(files_stat), .mode = 0444, .proc_handler = proc_nr_files, }, { .procname = "file-max", .data = &files_stat.max_files, .maxlen = sizeof(files_stat.max_files), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "nr_open", .data = &sysctl_nr_open, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &sysctl_nr_open_min, .extra2 = &sysctl_nr_open_max, }, { .procname = "dentry-state", .data = &dentry_stat, .maxlen = 6*sizeof(long), .mode = 0444, .proc_handler = proc_nr_dentry, }, { .procname = "overflowuid", .data = &fs_overflowuid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, { .procname = "overflowgid", .data = &fs_overflowgid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, #ifdef CONFIG_FILE_LOCKING { .procname = "leases-enable", .data = &leases_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DNOTIFY { .procname = "dir-notify-enable", .data = &dir_notify_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MMU #ifdef CONFIG_FILE_LOCKING { .procname = "lease-break-time", .data = &lease_break_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_AIO { .procname = "aio-nr", .data = &aio_nr, .maxlen = sizeof(aio_nr), .mode = 0444, .proc_handler = proc_doulongvec_minmax, }, { .procname = "aio-max-nr", .data = &aio_max_nr, .maxlen = sizeof(aio_max_nr), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #endif /* CONFIG_AIO */ #ifdef CONFIG_INOTIFY_USER { .procname = "inotify", .mode = 0555, .child = inotify_table, }, #endif #ifdef CONFIG_EPOLL { .procname = "epoll", .mode = 0555, .child = epoll_table, }, #endif #endif { .procname = "protected_symlinks", .data = &sysctl_protected_symlinks, .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "protected_hardlinks", .data = &sysctl_protected_hardlinks, .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "suid_dumpable", .data = &suid_dumpable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_coredump, .extra1 = &zero, .extra2 = &two, }, #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) { .procname = "binfmt_misc", .mode = 0555, .child = sysctl_mount_point, }, #endif { .procname = "pipe-max-size", .data = &pipe_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &pipe_proc_fn, .extra1 = &pipe_min_size, }, { .procname = "pipe-user-pages-hard", .data = &pipe_user_pages_hard, .maxlen = sizeof(pipe_user_pages_hard), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "pipe-user-pages-soft", .data = &pipe_user_pages_soft, .maxlen = sizeof(pipe_user_pages_soft), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { } }; static struct ctl_table debug_table[] = { #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE { .procname = "exception-trace", .data = &show_unhandled_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif #if defined(CONFIG_OPTPROBES) { .procname = "kprobes-optimization", .data = &sysctl_kprobes_optimization, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_kprobes_optimization_handler, .extra1 = &zero, .extra2 = &one, }, #endif { } }; static struct ctl_table dev_table[] = { { } }; int __init sysctl_init(void) { struct ctl_table_header *hdr; hdr = register_sysctl_table(sysctl_base_table); kmemleak_not_leak(hdr); return 0; } #endif /* CONFIG_SYSCTL */ /* * /proc/sys support */ #ifdef CONFIG_PROC_SYSCTL static int _proc_do_string(char *data, int maxlen, int write, char __user *buffer, size_t *lenp, loff_t *ppos) { size_t len; char __user *p; char c; if (!data || !maxlen || !*lenp) { *lenp = 0; return 0; } if (write) { if (sysctl_writes_strict == SYSCTL_WRITES_STRICT) { /* Only continue writes not past the end of buffer. */ len = strlen(data); if (len > maxlen - 1) len = maxlen - 1; if (*ppos > len) return 0; len = *ppos; } else { /* Start writing from beginning of buffer. */ len = 0; } *ppos += *lenp; p = buffer; while ((p - buffer) < *lenp && len < maxlen - 1) { if (get_user(c, p++)) return -EFAULT; if (c == 0 || c == '\n') break; data[len++] = c; } data[len] = 0; } else { len = strlen(data); if (len > maxlen) len = maxlen; if (*ppos > len) { *lenp = 0; return 0; } data += *ppos; len -= *ppos; if (len > *lenp) len = *lenp; if (len) if (copy_to_user(buffer, data, len)) return -EFAULT; if (len < *lenp) { if (put_user('\n', buffer + len)) return -EFAULT; len++; } *lenp = len; *ppos += len; } return 0; } static void warn_sysctl_write(struct ctl_table *table) { pr_warn_once("%s wrote to %s when file position was not 0!\n" "This will not be supported in the future. To silence this\n" "warning, set kernel.sysctl_writes_strict = -1\n", current->comm, table->procname); } /** * proc_dostring - read a string sysctl * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes a string from/to the user buffer. If the kernel * buffer provided is not large enough to hold the string, the * string is truncated. The copied string is %NULL-terminated. * If the string is being read by the user process, it is copied * and a newline '\n' is added. It is truncated if the buffer is * not large enough. * * Returns 0 on success. */ int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { if (write && *ppos && sysctl_writes_strict == SYSCTL_WRITES_WARN) warn_sysctl_write(table); return _proc_do_string((char *)(table->data), table->maxlen, write, (char __user *)buffer, lenp, ppos); } static size_t proc_skip_spaces(char **buf) { size_t ret; char *tmp = skip_spaces(*buf); ret = tmp - *buf; *buf = tmp; return ret; } static void proc_skip_char(char **buf, size_t *size, const char v) { while (*size) { if (**buf != v) break; (*size)--; (*buf)++; } } #define TMPBUFLEN 22 /** * proc_get_long - reads an ASCII formatted integer from a user buffer * * @buf: a kernel buffer * @size: size of the kernel buffer * @val: this is where the number will be stored * @neg: set to %TRUE if number is negative * @perm_tr: a vector which contains the allowed trailers * @perm_tr_len: size of the perm_tr vector * @tr: pointer to store the trailer character * * In case of success %0 is returned and @buf and @size are updated with * the amount of bytes read. If @tr is non-NULL and a trailing * character exists (size is non-zero after returning from this * function), @tr is updated with the trailing character. */ static int proc_get_long(char **buf, size_t *size, unsigned long *val, bool *neg, const char *perm_tr, unsigned perm_tr_len, char *tr) { int len; char *p, tmp[TMPBUFLEN]; if (!*size) return -EINVAL; len = *size; if (len > TMPBUFLEN - 1) len = TMPBUFLEN - 1; memcpy(tmp, *buf, len); tmp[len] = 0; p = tmp; if (*p == '-' && *size > 1) { *neg = true; p++; } else *neg = false; if (!isdigit(*p)) return -EINVAL; *val = simple_strtoul(p, &p, 0); len = p - tmp; /* We don't know if the next char is whitespace thus we may accept * invalid integers (e.g. 1234...a) or two integers instead of one * (e.g. 123...1). So lets not allow such large numbers. */ if (len == TMPBUFLEN - 1) return -EINVAL; if (len < *size && perm_tr_len && !memchr(perm_tr, *p, perm_tr_len)) return -EINVAL; if (tr && (len < *size)) *tr = *p; *buf += len; *size -= len; return 0; } /** * proc_put_long - converts an integer to a decimal ASCII formatted string * * @buf: the user buffer * @size: the size of the user buffer * @val: the integer to be converted * @neg: sign of the number, %TRUE for negative * * In case of success %0 is returned and @buf and @size are updated with * the amount of bytes written. */ static int proc_put_long(void __user **buf, size_t *size, unsigned long val, bool neg) { int len; char tmp[TMPBUFLEN], *p = tmp; sprintf(p, "%s%lu", neg ? "-" : "", val); len = strlen(tmp); if (len > *size) len = *size; if (copy_to_user(*buf, tmp, len)) return -EFAULT; *size -= len; *buf += len; return 0; } #undef TMPBUFLEN static int proc_put_char(void __user **buf, size_t *size, char c) { if (*size) { char __user **buffer = (char __user **)buf; if (put_user(c, *buffer)) return -EFAULT; (*size)--, (*buffer)++; *buf = *buffer; } return 0; } static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (*negp) { if (*lvalp > (unsigned long) INT_MAX + 1) return -EINVAL; *valp = -*lvalp; } else { if (*lvalp > (unsigned long) INT_MAX) return -EINVAL; *valp = *lvalp; } } else { int val = *valp; if (val < 0) { *negp = true; *lvalp = -(unsigned long)val; } else { *negp = false; *lvalp = (unsigned long)val; } } return 0; } static const char proc_wspace_sep[] = { ' ', '\t', '\n' }; static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(bool *negp, unsigned long *lvalp, int *valp, int write, void *data), void *data) { int *i, vleft, first = 1, err = 0; size_t left; char *kbuf = NULL, *p; if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } i = (int *) tbl_data; vleft = table->maxlen / sizeof(*i); left = *lenp; if (!conv) conv = do_proc_dointvec_conv; if (write) { if (*ppos) { switch (sysctl_writes_strict) { case SYSCTL_WRITES_STRICT: goto out; case SYSCTL_WRITES_WARN: warn_sysctl_write(table); break; default: break; } } if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; p = kbuf = memdup_user_nul(buffer, left); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); } for (; left && vleft--; i++, first=0) { unsigned long lval; bool neg; if (write) { left -= proc_skip_spaces(&p); if (!left) break; err = proc_get_long(&p, &left, &lval, &neg, proc_wspace_sep, sizeof(proc_wspace_sep), NULL); if (err) break; if (conv(&neg, &lval, i, 1, data)) { err = -EINVAL; break; } } else { if (conv(&neg, &lval, i, 0, data)) { err = -EINVAL; break; } if (!first) err = proc_put_char(&buffer, &left, '\t'); if (err) break; err = proc_put_long(&buffer, &left, lval, neg); if (err) break; } } if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err && left) left -= proc_skip_spaces(&p); if (write) { kfree(kbuf); if (first) return err ? : -EINVAL; } *lenp -= left; out: *ppos += *lenp; return err; } static int do_proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(bool *negp, unsigned long *lvalp, int *valp, int write, void *data), void *data) { return __do_proc_dointvec(table->data, table, write, buffer, lenp, ppos, conv, data); } /** * proc_dointvec - read a vector of integers * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * * Returns 0 on success. */ int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, NULL,NULL); } /* * Taint values can only be increased * This means we can safely use a temporary. */ static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; unsigned long tmptaint = get_taint(); int err; if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; t = *table; t.data = &tmptaint; err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err; if (write) { /* * Poor man's atomic or. Not worth adding a primitive * to everyone's atomic.h for this */ int i; for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) { if ((tmptaint >> i) & 1) add_taint(i, LOCKDEP_STILL_OK); } } return err; } #ifdef CONFIG_PRINTK static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } #endif struct do_proc_dointvec_minmax_conv_param { int *min; int *max; }; static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { struct do_proc_dointvec_minmax_conv_param *param = data; if (write) { int val = *negp ? -*lvalp : *lvalp; if ((param->min && *param->min > val) || (param->max && *param->max < val)) return -EINVAL; *valp = val; } else { int val = *valp; if (val < 0) { *negp = true; *lvalp = -(unsigned long)val; } else { *negp = false; *lvalp = (unsigned long)val; } } return 0; } /** * proc_dointvec_minmax - read a vector of integers with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct do_proc_dointvec_minmax_conv_param param = { .min = (int *) table->extra1, .max = (int *) table->extra2, }; return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_dointvec_minmax_conv, &param); } static void validate_coredump_safety(void) { #ifdef CONFIG_COREDUMP if (suid_dumpable == SUID_DUMP_ROOT && core_pattern[0] != '/' && core_pattern[0] != '|') { printk(KERN_WARNING "Unsafe core_pattern used with "\ "suid_dumpable=2. Pipe handler or fully qualified "\ "core dump path required.\n"); } #endif } static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (!error) validate_coredump_safety(); return error; } #ifdef CONFIG_COREDUMP static int proc_dostring_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error = proc_dostring(table, write, buffer, lenp, ppos); if (!error) validate_coredump_safety(); return error; } #endif static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul, unsigned long convdiv) { unsigned long *i, *min, *max; int vleft, first = 1, err = 0; size_t left; char *kbuf = NULL, *p; if (!data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } i = (unsigned long *) data; min = (unsigned long *) table->extra1; max = (unsigned long *) table->extra2; vleft = table->maxlen / sizeof(unsigned long); left = *lenp; if (write) { if (*ppos) { switch (sysctl_writes_strict) { case SYSCTL_WRITES_STRICT: goto out; case SYSCTL_WRITES_WARN: warn_sysctl_write(table); break; default: break; } } if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; p = kbuf = memdup_user_nul(buffer, left); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); } for (; left && vleft--; i++, first = 0) { unsigned long val; if (write) { bool neg; left -= proc_skip_spaces(&p); err = proc_get_long(&p, &left, &val, &neg, proc_wspace_sep, sizeof(proc_wspace_sep), NULL); if (err) break; if (neg) continue; if ((min && val < *min) || (max && val > *max)) continue; *i = val; } else { val = convdiv * (*i) / convmul; if (!first) { err = proc_put_char(&buffer, &left, '\t'); if (err) break; } err = proc_put_long(&buffer, &left, val, false); if (err) break; } } if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err) left -= proc_skip_spaces(&p); if (write) { kfree(kbuf); if (first) return err ? : -EINVAL; } *lenp -= left; out: *ppos += *lenp; return err; } static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul, unsigned long convdiv) { return __do_proc_doulongvec_minmax(table->data, table, write, buffer, lenp, ppos, convmul, convdiv); } /** * proc_doulongvec_minmax - read a vector of long integers with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long * values from/to the user buffer, treated as an ASCII string. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l); } /** * proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long * values from/to the user buffer, treated as an ASCII string. The values * are treated as milliseconds, and converted to jiffies when they are stored. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, HZ, 1000l); } static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (*lvalp > LONG_MAX / HZ) return 1; *valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = -(unsigned long)val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = lval / HZ; } return 0; } static int do_proc_dointvec_userhz_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (USER_HZ < HZ && *lvalp > (LONG_MAX / HZ) * USER_HZ) return 1; *valp = clock_t_to_jiffies(*negp ? -*lvalp : *lvalp); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = -(unsigned long)val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = jiffies_to_clock_t(lval); } return 0; } static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { unsigned long jif = msecs_to_jiffies(*negp ? -*lvalp : *lvalp); if (jif > INT_MAX) return 1; *valp = (int)jif; } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = -(unsigned long)val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = jiffies_to_msecs(lval); } return 0; } /** * proc_dointvec_jiffies - read a vector of integers as seconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in seconds, and are converted into * jiffies. * * Returns 0 on success. */ int proc_dointvec_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, do_proc_dointvec_jiffies_conv,NULL); } /** * proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: pointer to the file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in 1/USER_HZ seconds, and * are converted into jiffies. * * Returns 0 on success. */ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, do_proc_dointvec_userhz_jiffies_conv,NULL); } /** * proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * @ppos: the current position in the file * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in 1/1000 seconds, and * are converted into jiffies. * * Returns 0 on success. */ int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_dointvec_ms_jiffies_conv, NULL); } static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct pid *new_pid; pid_t tmp; int r; tmp = pid_vnr(cad_pid); r = __do_proc_dointvec(&tmp, table, write, buffer, lenp, ppos, NULL, NULL); if (r || !write) return r; new_pid = find_get_pid(tmp); if (!new_pid) return -ESRCH; put_pid(xchg(&cad_pid, new_pid)); return 0; } /** * proc_do_large_bitmap - read/write from/to a large bitmap * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * The bitmap is stored at table->data and the bitmap length (in bits) * in table->maxlen. * * We use a range comma separated format (e.g. 1,3-4,10-10) so that * large bitmaps may be represented in a compact manner. Writing into * the file will clear the bitmap then update it with the given input. * * Returns 0 on success. */ int proc_do_large_bitmap(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int err = 0; bool first = 1; size_t left = *lenp; unsigned long bitmap_len = table->maxlen; unsigned long *bitmap = *(unsigned long **) table->data; unsigned long *tmp_bitmap = NULL; char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c; if (!bitmap || !bitmap_len || !left || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { char *kbuf, *p; if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; p = kbuf = memdup_user_nul(buffer, left); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); tmp_bitmap = kzalloc(BITS_TO_LONGS(bitmap_len) * sizeof(unsigned long), GFP_KERNEL); if (!tmp_bitmap) { kfree(kbuf); return -ENOMEM; } proc_skip_char(&p, &left, '\n'); while (!err && left) { unsigned long val_a, val_b; bool neg; err = proc_get_long(&p, &left, &val_a, &neg, tr_a, sizeof(tr_a), &c); if (err) break; if (val_a >= bitmap_len || neg) { err = -EINVAL; break; } val_b = val_a; if (left) { p++; left--; } if (c == '-') { err = proc_get_long(&p, &left, &val_b, &neg, tr_b, sizeof(tr_b), &c); if (err) break; if (val_b >= bitmap_len || neg || val_a > val_b) { err = -EINVAL; break; } if (left) { p++; left--; } } bitmap_set(tmp_bitmap, val_a, val_b - val_a + 1); first = 0; proc_skip_char(&p, &left, '\n'); } kfree(kbuf); } else { unsigned long bit_a, bit_b = 0; while (left) { bit_a = find_next_bit(bitmap, bitmap_len, bit_b); if (bit_a >= bitmap_len) break; bit_b = find_next_zero_bit(bitmap, bitmap_len, bit_a + 1) - 1; if (!first) { err = proc_put_char(&buffer, &left, ','); if (err) break; } err = proc_put_long(&buffer, &left, bit_a, false); if (err) break; if (bit_a != bit_b) { err = proc_put_char(&buffer, &left, '-'); if (err) break; err = proc_put_long(&buffer, &left, bit_b, false); if (err) break; } first = 0; bit_b++; } if (!err) err = proc_put_char(&buffer, &left, '\n'); } if (!err) { if (write) { if (*ppos) bitmap_or(bitmap, bitmap, tmp_bitmap, bitmap_len); else bitmap_copy(bitmap, tmp_bitmap, bitmap_len); } kfree(tmp_bitmap); *lenp -= left; *ppos += *lenp; return 0; } else { kfree(tmp_bitmap); return err; } } #else /* CONFIG_PROC_SYSCTL */ int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } #endif /* CONFIG_PROC_SYSCTL */ /* * No sense putting this after each symbol definition, twice, * exception granted :-) */ EXPORT_SYMBOL(proc_dointvec); EXPORT_SYMBOL(proc_dointvec_jiffies); EXPORT_SYMBOL(proc_dointvec_minmax); EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); EXPORT_SYMBOL(proc_dointvec_ms_jiffies); EXPORT_SYMBOL(proc_dostring); EXPORT_SYMBOL(proc_doulongvec_minmax); EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
./CrossVul/dataset_final_sorted/CWE-399/c/good_4984_4
crossvul-cpp_data_bad_3471_2
/* * linux/net/sunrpc/clnt.c * * This file contains the high-level RPC interface. * It is modeled as a finite state machine to support both synchronous * and asynchronous requests. * * - RPC header generation and argument serialization. * - Credential refresh. * - TCP connect handling. * - Retry of operation when it is suspected the operation failed because * of uid squashing on the server, or when the credentials were stale * and need to be refreshed, or when a packet was damaged in transit. * This may be have to be moved to the VFS layer. * * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> */ #include <asm/system.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kallsyms.h> #include <linux/mm.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/utsname.h> #include <linux/workqueue.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/un.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/metrics.h> #include <linux/sunrpc/bc_xprt.h> #include "sunrpc.h" #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_CALL #endif #define dprint_status(t) \ dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ __func__, t->tk_status) /* * All RPC clients are linked into this list */ static LIST_HEAD(all_clients); static DEFINE_SPINLOCK(rpc_client_lock); static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); static void call_start(struct rpc_task *task); static void call_reserve(struct rpc_task *task); static void call_reserveresult(struct rpc_task *task); static void call_allocate(struct rpc_task *task); static void call_decode(struct rpc_task *task); static void call_bind(struct rpc_task *task); static void call_bind_status(struct rpc_task *task); static void call_transmit(struct rpc_task *task); #if defined(CONFIG_NFS_V4_1) static void call_bc_transmit(struct rpc_task *task); #endif /* CONFIG_NFS_V4_1 */ static void call_status(struct rpc_task *task); static void call_transmit_status(struct rpc_task *task); static void call_refresh(struct rpc_task *task); static void call_refreshresult(struct rpc_task *task); static void call_timeout(struct rpc_task *task); static void call_connect(struct rpc_task *task); static void call_connect_status(struct rpc_task *task); static __be32 *rpc_encode_header(struct rpc_task *task); static __be32 *rpc_verify_header(struct rpc_task *task); static int rpc_ping(struct rpc_clnt *clnt); static void rpc_register_client(struct rpc_clnt *clnt) { spin_lock(&rpc_client_lock); list_add(&clnt->cl_clients, &all_clients); spin_unlock(&rpc_client_lock); } static void rpc_unregister_client(struct rpc_clnt *clnt) { spin_lock(&rpc_client_lock); list_del(&clnt->cl_clients); spin_unlock(&rpc_client_lock); } static int rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) { static uint32_t clntid; struct nameidata nd; struct path path; char name[15]; struct qstr q = { .name = name, }; int error; clnt->cl_path.mnt = ERR_PTR(-ENOENT); clnt->cl_path.dentry = ERR_PTR(-ENOENT); if (dir_name == NULL) return 0; path.mnt = rpc_get_mount(); if (IS_ERR(path.mnt)) return PTR_ERR(path.mnt); error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd); if (error) goto err; for (;;) { q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); name[sizeof(name) - 1] = '\0'; q.hash = full_name_hash(q.name, q.len); path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt); if (!IS_ERR(path.dentry)) break; error = PTR_ERR(path.dentry); if (error != -EEXIST) { printk(KERN_INFO "RPC: Couldn't create pipefs entry" " %s/%s, error %d\n", dir_name, name, error); goto err_path_put; } } path_put(&nd.path); clnt->cl_path = path; return 0; err_path_put: path_put(&nd.path); err: rpc_put_mount(); return error; } static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) { struct rpc_program *program = args->program; struct rpc_version *version; struct rpc_clnt *clnt = NULL; struct rpc_auth *auth; int err; size_t len; /* sanity check the name before trying to print it */ err = -EINVAL; len = strlen(args->servername); if (len > RPC_MAXNETNAMELEN) goto out_no_rpciod; len++; dprintk("RPC: creating %s client for %s (xprt %p)\n", program->name, args->servername, xprt); err = rpciod_up(); if (err) goto out_no_rpciod; err = -EINVAL; if (!xprt) goto out_no_xprt; if (args->version >= program->nrvers) goto out_err; version = program->version[args->version]; if (version == NULL) goto out_err; err = -ENOMEM; clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); if (!clnt) goto out_err; clnt->cl_parent = clnt; clnt->cl_server = clnt->cl_inline_name; if (len > sizeof(clnt->cl_inline_name)) { char *buf = kmalloc(len, GFP_KERNEL); if (buf != NULL) clnt->cl_server = buf; else len = sizeof(clnt->cl_inline_name); } strlcpy(clnt->cl_server, args->servername, len); clnt->cl_xprt = xprt; clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; clnt->cl_protname = program->name; clnt->cl_prog = args->prognumber ? : program->number; clnt->cl_vers = version->number; clnt->cl_stats = program->stats; clnt->cl_metrics = rpc_alloc_iostats(clnt); err = -ENOMEM; if (clnt->cl_metrics == NULL) goto out_no_stats; clnt->cl_program = program; INIT_LIST_HEAD(&clnt->cl_tasks); spin_lock_init(&clnt->cl_lock); if (!xprt_bound(clnt->cl_xprt)) clnt->cl_autobind = 1; clnt->cl_timeout = xprt->timeout; if (args->timeout != NULL) { memcpy(&clnt->cl_timeout_default, args->timeout, sizeof(clnt->cl_timeout_default)); clnt->cl_timeout = &clnt->cl_timeout_default; } clnt->cl_rtt = &clnt->cl_rtt_default; rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); clnt->cl_principal = NULL; if (args->client_name) { clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL); if (!clnt->cl_principal) goto out_no_principal; } atomic_set(&clnt->cl_count, 1); err = rpc_setup_pipedir(clnt, program->pipe_dir_name); if (err < 0) goto out_no_path; auth = rpcauth_create(args->authflavor, clnt); if (IS_ERR(auth)) { printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", args->authflavor); err = PTR_ERR(auth); goto out_no_auth; } /* save the nodename */ clnt->cl_nodelen = strlen(init_utsname()->nodename); if (clnt->cl_nodelen > UNX_MAXNODENAME) clnt->cl_nodelen = UNX_MAXNODENAME; memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen); rpc_register_client(clnt); return clnt; out_no_auth: if (!IS_ERR(clnt->cl_path.dentry)) { rpc_remove_client_dir(clnt->cl_path.dentry); rpc_put_mount(); } out_no_path: kfree(clnt->cl_principal); out_no_principal: rpc_free_iostats(clnt->cl_metrics); out_no_stats: if (clnt->cl_server != clnt->cl_inline_name) kfree(clnt->cl_server); kfree(clnt); out_err: xprt_put(xprt); out_no_xprt: rpciod_down(); out_no_rpciod: return ERR_PTR(err); } /* * rpc_create - create an RPC client and transport with one call * @args: rpc_clnt create argument structure * * Creates and initializes an RPC transport and an RPC client. * * It can ping the server in order to determine if it is up, and to see if * it supports this program and version. RPC_CLNT_CREATE_NOPING disables * this behavior so asynchronous tasks can also use rpc_create. */ struct rpc_clnt *rpc_create(struct rpc_create_args *args) { struct rpc_xprt *xprt; struct rpc_clnt *clnt; struct xprt_create xprtargs = { .net = args->net, .ident = args->protocol, .srcaddr = args->saddress, .dstaddr = args->address, .addrlen = args->addrsize, .bc_xprt = args->bc_xprt, }; char servername[48]; /* * If the caller chooses not to specify a hostname, whip * up a string representation of the passed-in address. */ if (args->servername == NULL) { struct sockaddr_un *sun = (struct sockaddr_un *)args->address; struct sockaddr_in *sin = (struct sockaddr_in *)args->address; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)args->address; servername[0] = '\0'; switch (args->address->sa_family) { case AF_LOCAL: snprintf(servername, sizeof(servername), "%s", sun->sun_path); break; case AF_INET: snprintf(servername, sizeof(servername), "%pI4", &sin->sin_addr.s_addr); break; case AF_INET6: snprintf(servername, sizeof(servername), "%pI6", &sin6->sin6_addr); break; default: /* caller wants default server name, but * address family isn't recognized. */ return ERR_PTR(-EINVAL); } args->servername = servername; } xprt = xprt_create_transport(&xprtargs); if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; /* * By default, kernel RPC client connects from a reserved port. * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, * but it is always enabled for rpciod, which handles the connect * operation. */ xprt->resvport = 1; if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) xprt->resvport = 0; clnt = rpc_new_client(args, xprt); if (IS_ERR(clnt)) return clnt; if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { int err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); return ERR_PTR(err); } } clnt->cl_softrtry = 1; if (args->flags & RPC_CLNT_CREATE_HARDRTRY) clnt->cl_softrtry = 0; if (args->flags & RPC_CLNT_CREATE_AUTOBIND) clnt->cl_autobind = 1; if (args->flags & RPC_CLNT_CREATE_DISCRTRY) clnt->cl_discrtry = 1; if (!(args->flags & RPC_CLNT_CREATE_QUIET)) clnt->cl_chatty = 1; return clnt; } EXPORT_SYMBOL_GPL(rpc_create); /* * This function clones the RPC client structure. It allows us to share the * same transport while varying parameters such as the authentication * flavour. */ struct rpc_clnt * rpc_clone_client(struct rpc_clnt *clnt) { struct rpc_clnt *new; int err = -ENOMEM; new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); if (!new) goto out_no_clnt; new->cl_parent = clnt; /* Turn off autobind on clones */ new->cl_autobind = 0; INIT_LIST_HEAD(&new->cl_tasks); spin_lock_init(&new->cl_lock); rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval); new->cl_metrics = rpc_alloc_iostats(clnt); if (new->cl_metrics == NULL) goto out_no_stats; if (clnt->cl_principal) { new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL); if (new->cl_principal == NULL) goto out_no_principal; } atomic_set(&new->cl_count, 1); err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); if (err != 0) goto out_no_path; if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); xprt_get(clnt->cl_xprt); atomic_inc(&clnt->cl_count); rpc_register_client(new); rpciod_up(); return new; out_no_path: kfree(new->cl_principal); out_no_principal: rpc_free_iostats(new->cl_metrics); out_no_stats: kfree(new); out_no_clnt: dprintk("RPC: %s: returned error %d\n", __func__, err); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(rpc_clone_client); /* * Kill all tasks for the given client. * XXX: kill their descendants as well? */ void rpc_killall_tasks(struct rpc_clnt *clnt) { struct rpc_task *rovr; if (list_empty(&clnt->cl_tasks)) return; dprintk("RPC: killing all tasks for client %p\n", clnt); /* * Spin lock all_tasks to prevent changes... */ spin_lock(&clnt->cl_lock); list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { if (!RPC_IS_ACTIVATED(rovr)) continue; if (!(rovr->tk_flags & RPC_TASK_KILLED)) { rovr->tk_flags |= RPC_TASK_KILLED; rpc_exit(rovr, -EIO); if (RPC_IS_QUEUED(rovr)) rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr); } } spin_unlock(&clnt->cl_lock); } EXPORT_SYMBOL_GPL(rpc_killall_tasks); /* * Properly shut down an RPC client, terminating all outstanding * requests. */ void rpc_shutdown_client(struct rpc_clnt *clnt) { dprintk("RPC: shutting down %s client for %s\n", clnt->cl_protname, clnt->cl_server); while (!list_empty(&clnt->cl_tasks)) { rpc_killall_tasks(clnt); wait_event_timeout(destroy_wait, list_empty(&clnt->cl_tasks), 1*HZ); } rpc_release_client(clnt); } EXPORT_SYMBOL_GPL(rpc_shutdown_client); /* * Free an RPC client */ static void rpc_free_client(struct rpc_clnt *clnt) { dprintk("RPC: destroying %s client for %s\n", clnt->cl_protname, clnt->cl_server); if (!IS_ERR(clnt->cl_path.dentry)) { rpc_remove_client_dir(clnt->cl_path.dentry); rpc_put_mount(); } if (clnt->cl_parent != clnt) { rpc_release_client(clnt->cl_parent); goto out_free; } if (clnt->cl_server != clnt->cl_inline_name) kfree(clnt->cl_server); out_free: rpc_unregister_client(clnt); rpc_free_iostats(clnt->cl_metrics); kfree(clnt->cl_principal); clnt->cl_metrics = NULL; xprt_put(clnt->cl_xprt); rpciod_down(); kfree(clnt); } /* * Free an RPC client */ static void rpc_free_auth(struct rpc_clnt *clnt) { if (clnt->cl_auth == NULL) { rpc_free_client(clnt); return; } /* * Note: RPCSEC_GSS may need to send NULL RPC calls in order to * release remaining GSS contexts. This mechanism ensures * that it can do so safely. */ atomic_inc(&clnt->cl_count); rpcauth_release(clnt->cl_auth); clnt->cl_auth = NULL; if (atomic_dec_and_test(&clnt->cl_count)) rpc_free_client(clnt); } /* * Release reference to the RPC client */ void rpc_release_client(struct rpc_clnt *clnt) { dprintk("RPC: rpc_release_client(%p)\n", clnt); if (list_empty(&clnt->cl_tasks)) wake_up(&destroy_wait); if (atomic_dec_and_test(&clnt->cl_count)) rpc_free_auth(clnt); } /** * rpc_bind_new_program - bind a new RPC program to an existing client * @old: old rpc_client * @program: rpc program to set * @vers: rpc program version * * Clones the rpc client and sets up a new RPC program. This is mainly * of use for enabling different RPC programs to share the same transport. * The Sun NFSv2/v3 ACL protocol can do this. */ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, struct rpc_program *program, u32 vers) { struct rpc_clnt *clnt; struct rpc_version *version; int err; BUG_ON(vers >= program->nrvers || !program->version[vers]); version = program->version[vers]; clnt = rpc_clone_client(old); if (IS_ERR(clnt)) goto out; clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; clnt->cl_protname = program->name; clnt->cl_prog = program->number; clnt->cl_vers = version->number; clnt->cl_stats = program->stats; err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); clnt = ERR_PTR(err); } out: return clnt; } EXPORT_SYMBOL_GPL(rpc_bind_new_program); void rpc_task_release_client(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; if (clnt != NULL) { /* Remove from client task list */ spin_lock(&clnt->cl_lock); list_del(&task->tk_task); spin_unlock(&clnt->cl_lock); task->tk_client = NULL; rpc_release_client(clnt); } } static void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) { if (clnt != NULL) { rpc_task_release_client(task); task->tk_client = clnt; atomic_inc(&clnt->cl_count); if (clnt->cl_softrtry) task->tk_flags |= RPC_TASK_SOFT; /* Add to the client's list of all tasks */ spin_lock(&clnt->cl_lock); list_add_tail(&task->tk_task, &clnt->cl_tasks); spin_unlock(&clnt->cl_lock); } } void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt) { rpc_task_release_client(task); rpc_task_set_client(task, clnt); } EXPORT_SYMBOL_GPL(rpc_task_reset_client); static void rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) { if (msg != NULL) { task->tk_msg.rpc_proc = msg->rpc_proc; task->tk_msg.rpc_argp = msg->rpc_argp; task->tk_msg.rpc_resp = msg->rpc_resp; if (msg->rpc_cred != NULL) task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred); } } /* * Default callback for async RPC calls */ static void rpc_default_callback(struct rpc_task *task, void *data) { } static const struct rpc_call_ops rpc_default_ops = { .rpc_call_done = rpc_default_callback, }; /** * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it * @task_setup_data: pointer to task initialisation data */ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) { struct rpc_task *task; task = rpc_new_task(task_setup_data); if (IS_ERR(task)) goto out; rpc_task_set_client(task, task_setup_data->rpc_client); rpc_task_set_rpc_message(task, task_setup_data->rpc_message); if (task->tk_action == NULL) rpc_call_start(task); atomic_inc(&task->tk_count); rpc_execute(task); out: return task; } EXPORT_SYMBOL_GPL(rpc_run_task); /** * rpc_call_sync - Perform a synchronous RPC call * @clnt: pointer to RPC client * @msg: RPC call parameters * @flags: RPC call flags */ int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = &rpc_default_ops, .flags = flags, }; int status; BUG_ON(flags & RPC_TASK_ASYNC); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); return status; } EXPORT_SYMBOL_GPL(rpc_call_sync); /** * rpc_call_async - Perform an asynchronous RPC call * @clnt: pointer to RPC client * @msg: RPC call parameters * @flags: RPC call flags * @tk_ops: RPC call ops * @data: user call data */ int rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, const struct rpc_call_ops *tk_ops, void *data) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = tk_ops, .callback_data = data, .flags = flags|RPC_TASK_ASYNC, }; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } EXPORT_SYMBOL_GPL(rpc_call_async); #if defined(CONFIG_NFS_V4_1) /** * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run * rpc_execute against it * @req: RPC request * @tk_ops: RPC call ops */ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, const struct rpc_call_ops *tk_ops) { struct rpc_task *task; struct xdr_buf *xbufp = &req->rq_snd_buf; struct rpc_task_setup task_setup_data = { .callback_ops = tk_ops, }; dprintk("RPC: rpc_run_bc_task req= %p\n", req); /* * Create an rpc_task to send the data */ task = rpc_new_task(&task_setup_data); if (IS_ERR(task)) { xprt_free_bc_request(req); goto out; } task->tk_rqstp = req; /* * Set up the xdr_buf length. * This also indicates that the buffer is XDR encoded already. */ xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + xbufp->tail[0].iov_len; task->tk_action = call_bc_transmit; atomic_inc(&task->tk_count); BUG_ON(atomic_read(&task->tk_count) != 2); rpc_execute(task); out: dprintk("RPC: rpc_run_bc_task: task= %p\n", task); return task; } #endif /* CONFIG_NFS_V4_1 */ void rpc_call_start(struct rpc_task *task) { task->tk_action = call_start; } EXPORT_SYMBOL_GPL(rpc_call_start); /** * rpc_peeraddr - extract remote peer address from clnt's xprt * @clnt: RPC client structure * @buf: target buffer * @bufsize: length of target buffer * * Returns the number of bytes that are actually in the stored address. */ size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) { size_t bytes; struct rpc_xprt *xprt = clnt->cl_xprt; bytes = sizeof(xprt->addr); if (bytes > bufsize) bytes = bufsize; memcpy(buf, &clnt->cl_xprt->addr, bytes); return xprt->addrlen; } EXPORT_SYMBOL_GPL(rpc_peeraddr); /** * rpc_peeraddr2str - return remote peer address in printable format * @clnt: RPC client structure * @format: address format * */ const char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) { struct rpc_xprt *xprt = clnt->cl_xprt; if (xprt->address_strings[format] != NULL) return xprt->address_strings[format]; else return "unprintable"; } EXPORT_SYMBOL_GPL(rpc_peeraddr2str); void rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) { struct rpc_xprt *xprt = clnt->cl_xprt; if (xprt->ops->set_buffer_size) xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); } EXPORT_SYMBOL_GPL(rpc_setbufsize); /* * Return size of largest payload RPC client can support, in bytes * * For stream transports, this is one RPC record fragment (see RFC * 1831), as we don't support multi-record requests yet. For datagram * transports, this is the size of an IP packet minus the IP, UDP, and * RPC header sizes. */ size_t rpc_max_payload(struct rpc_clnt *clnt) { return clnt->cl_xprt->max_payload; } EXPORT_SYMBOL_GPL(rpc_max_payload); /** * rpc_force_rebind - force transport to check that remote port is unchanged * @clnt: client to rebind * */ void rpc_force_rebind(struct rpc_clnt *clnt) { if (clnt->cl_autobind) xprt_clear_bound(clnt->cl_xprt); } EXPORT_SYMBOL_GPL(rpc_force_rebind); /* * Restart an (async) RPC call from the call_prepare state. * Usually called from within the exit handler. */ int rpc_restart_call_prepare(struct rpc_task *task) { if (RPC_ASSASSINATED(task)) return 0; task->tk_action = rpc_prepare_task; return 1; } EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); /* * Restart an (async) RPC call. Usually called from within the * exit handler. */ int rpc_restart_call(struct rpc_task *task) { if (RPC_ASSASSINATED(task)) return 0; task->tk_action = call_start; return 1; } EXPORT_SYMBOL_GPL(rpc_restart_call); #ifdef RPC_DEBUG static const char *rpc_proc_name(const struct rpc_task *task) { const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; if (proc) { if (proc->p_name) return proc->p_name; else return "NULL"; } else return "no proc"; } #endif /* * 0. Initial state * * Other FSM states can be visited zero or more times, but * this state is visited exactly once for each RPC. */ static void call_start(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid, clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), (RPC_IS_ASYNC(task) ? "async" : "sync")); /* Increment call count */ task->tk_msg.rpc_proc->p_count++; clnt->cl_stats->rpccnt++; task->tk_action = call_reserve; } /* * 1. Reserve an RPC call slot */ static void call_reserve(struct rpc_task *task) { dprint_status(task); task->tk_status = 0; task->tk_action = call_reserveresult; xprt_reserve(task); } /* * 1b. Grok the result of xprt_reserve() */ static void call_reserveresult(struct rpc_task *task) { int status = task->tk_status; dprint_status(task); /* * After a call to xprt_reserve(), we must have either * a request slot or else an error status. */ task->tk_status = 0; if (status >= 0) { if (task->tk_rqstp) { task->tk_action = call_refresh; return; } printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", __func__, status); rpc_exit(task, -EIO); return; } /* * Even though there was an error, we may have acquired * a request slot somehow. Make sure not to leak it. */ if (task->tk_rqstp) { printk(KERN_ERR "%s: status=%d, request allocated anyway\n", __func__, status); xprt_release(task); } switch (status) { case -EAGAIN: /* woken up; retry */ task->tk_action = call_reserve; return; case -EIO: /* probably a shutdown */ break; default: printk(KERN_ERR "%s: unrecognized error %d, exiting\n", __func__, status); break; } rpc_exit(task, status); } /* * 2. Bind and/or refresh the credentials */ static void call_refresh(struct rpc_task *task) { dprint_status(task); task->tk_action = call_refreshresult; task->tk_status = 0; task->tk_client->cl_stats->rpcauthrefresh++; rpcauth_refreshcred(task); } /* * 2a. Process the results of a credential refresh */ static void call_refreshresult(struct rpc_task *task) { int status = task->tk_status; dprint_status(task); task->tk_status = 0; task->tk_action = call_refresh; switch (status) { case 0: if (rpcauth_uptodatecred(task)) task->tk_action = call_allocate; return; case -ETIMEDOUT: rpc_delay(task, 3*HZ); case -EAGAIN: status = -EACCES; if (!task->tk_cred_retry) break; task->tk_cred_retry--; dprintk("RPC: %5u %s: retry refresh creds\n", task->tk_pid, __func__); return; } dprintk("RPC: %5u %s: refresh creds failed with error %d\n", task->tk_pid, __func__, status); rpc_exit(task, status); } /* * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. * (Note: buffer memory is freed in xprt_release). */ static void call_allocate(struct rpc_task *task) { unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = task->tk_xprt; struct rpc_procinfo *proc = task->tk_msg.rpc_proc; dprint_status(task); task->tk_status = 0; task->tk_action = call_bind; if (req->rq_buffer) return; if (proc->p_proc != 0) { BUG_ON(proc->p_arglen == 0); if (proc->p_decode != NULL) BUG_ON(proc->p_replen == 0); } /* * Calculate the size (in quads) of the RPC call * and reply headers, and convert both values * to byte sizes. */ req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen; req->rq_callsize <<= 2; req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; req->rq_rcvsize <<= 2; req->rq_buffer = xprt->ops->buf_alloc(task, req->rq_callsize + req->rq_rcvsize); if (req->rq_buffer != NULL) return; dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); if (RPC_IS_ASYNC(task) || !signalled()) { task->tk_action = call_allocate; rpc_delay(task, HZ>>4); return; } rpc_exit(task, -ERESTARTSYS); } static inline int rpc_task_need_encode(struct rpc_task *task) { return task->tk_rqstp->rq_snd_buf.len == 0; } static inline void rpc_task_force_reencode(struct rpc_task *task) { task->tk_rqstp->rq_snd_buf.len = 0; task->tk_rqstp->rq_bytes_sent = 0; } static inline void rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) { buf->head[0].iov_base = start; buf->head[0].iov_len = len; buf->tail[0].iov_len = 0; buf->page_len = 0; buf->flags = 0; buf->len = 0; buf->buflen = len; } /* * 3. Encode arguments of an RPC call */ static void rpc_xdr_encode(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; kxdreproc_t encode; __be32 *p; dprint_status(task); rpc_xdr_buf_init(&req->rq_snd_buf, req->rq_buffer, req->rq_callsize); rpc_xdr_buf_init(&req->rq_rcv_buf, (char *)req->rq_buffer + req->rq_callsize, req->rq_rcvsize); p = rpc_encode_header(task); if (p == NULL) { printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); rpc_exit(task, -EIO); return; } encode = task->tk_msg.rpc_proc->p_encode; if (encode == NULL) return; task->tk_status = rpcauth_wrap_req(task, encode, req, p, task->tk_msg.rpc_argp); } /* * 4. Get the server port number if not yet set */ static void call_bind(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; dprint_status(task); task->tk_action = call_connect; if (!xprt_bound(xprt)) { task->tk_action = call_bind_status; task->tk_timeout = xprt->bind_timeout; xprt->ops->rpcbind(task); } } /* * 4a. Sort out bind result */ static void call_bind_status(struct rpc_task *task) { int status = -EIO; if (task->tk_status >= 0) { dprint_status(task); task->tk_status = 0; task->tk_action = call_connect; return; } switch (task->tk_status) { case -ENOMEM: dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); rpc_delay(task, HZ >> 2); goto retry_timeout; case -EACCES: dprintk("RPC: %5u remote rpcbind: RPC program/version " "unavailable\n", task->tk_pid); /* fail immediately if this is an RPC ping */ if (task->tk_msg.rpc_proc->p_proc == 0) { status = -EOPNOTSUPP; break; } rpc_delay(task, 3*HZ); goto retry_timeout; case -ETIMEDOUT: dprintk("RPC: %5u rpcbind request timed out\n", task->tk_pid); goto retry_timeout; case -EPFNOSUPPORT: /* server doesn't support any rpcbind version we know of */ dprintk("RPC: %5u unrecognized remote rpcbind service\n", task->tk_pid); break; case -EPROTONOSUPPORT: dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", task->tk_pid); task->tk_status = 0; task->tk_action = call_bind; return; case -ECONNREFUSED: /* connection problems */ case -ECONNRESET: case -ENOTCONN: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPIPE: dprintk("RPC: %5u remote rpcbind unreachable: %d\n", task->tk_pid, task->tk_status); if (!RPC_IS_SOFTCONN(task)) { rpc_delay(task, 5*HZ); goto retry_timeout; } status = task->tk_status; break; default: dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", task->tk_pid, -task->tk_status); } rpc_exit(task, status); return; retry_timeout: task->tk_action = call_timeout; } /* * 4b. Connect to the RPC server */ static void call_connect(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; dprintk("RPC: %5u call_connect xprt %p %s connected\n", task->tk_pid, xprt, (xprt_connected(xprt) ? "is" : "is not")); task->tk_action = call_transmit; if (!xprt_connected(xprt)) { task->tk_action = call_connect_status; if (task->tk_status < 0) return; xprt_connect(task); } } /* * 4c. Sort out connect result */ static void call_connect_status(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status; dprint_status(task); task->tk_status = 0; if (status >= 0 || status == -EAGAIN) { clnt->cl_stats->netreconn++; task->tk_action = call_transmit; return; } switch (status) { /* if soft mounted, test if we've timed out */ case -ETIMEDOUT: task->tk_action = call_timeout; break; default: rpc_exit(task, -EIO); } } /* * 5. Transmit the RPC request, and wait for reply */ static void call_transmit(struct rpc_task *task) { dprint_status(task); task->tk_action = call_status; if (task->tk_status < 0) return; task->tk_status = xprt_prepare_transmit(task); if (task->tk_status != 0) return; task->tk_action = call_transmit_status; /* Encode here so that rpcsec_gss can use correct sequence number. */ if (rpc_task_need_encode(task)) { BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); rpc_xdr_encode(task); /* Did the encode result in an error condition? */ if (task->tk_status != 0) { /* Was the error nonfatal? */ if (task->tk_status == -EAGAIN) rpc_delay(task, HZ >> 4); else rpc_exit(task, task->tk_status); return; } } xprt_transmit(task); if (task->tk_status < 0) return; /* * On success, ensure that we call xprt_end_transmit() before sleeping * in order to allow access to the socket to other RPC requests. */ call_transmit_status(task); if (rpc_reply_expected(task)) return; task->tk_action = rpc_exit_task; rpc_wake_up_queued_task(&task->tk_xprt->pending, task); } /* * 5a. Handle cleanup after a transmission */ static void call_transmit_status(struct rpc_task *task) { task->tk_action = call_status; /* * Common case: success. Force the compiler to put this * test first. */ if (task->tk_status == 0) { xprt_end_transmit(task); rpc_task_force_reencode(task); return; } switch (task->tk_status) { case -EAGAIN: break; default: dprint_status(task); xprt_end_transmit(task); rpc_task_force_reencode(task); break; /* * Special cases: if we've been waiting on the * socket's write_space() callback, or if the * socket just returned a connection error, * then hold onto the transport lock. */ case -ECONNREFUSED: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: if (RPC_IS_SOFTCONN(task)) { xprt_end_transmit(task); rpc_exit(task, task->tk_status); break; } case -ECONNRESET: case -ENOTCONN: case -EPIPE: rpc_task_force_reencode(task); } } #if defined(CONFIG_NFS_V4_1) /* * 5b. Send the backchannel RPC reply. On error, drop the reply. In * addition, disconnect on connectivity errors. */ static void call_bc_transmit(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; BUG_ON(task->tk_status != 0); task->tk_status = xprt_prepare_transmit(task); if (task->tk_status == -EAGAIN) { /* * Could not reserve the transport. Try again after the * transport is released. */ task->tk_status = 0; task->tk_action = call_bc_transmit; return; } task->tk_action = rpc_exit_task; if (task->tk_status < 0) { printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); return; } xprt_transmit(task); xprt_end_transmit(task); dprint_status(task); switch (task->tk_status) { case 0: /* Success */ break; case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -ETIMEDOUT: /* * Problem reaching the server. Disconnect and let the * forechannel reestablish the connection. The server will * have to retransmit the backchannel request and we'll * reprocess it. Since these ops are idempotent, there's no * need to cache our reply at this time. */ printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); xprt_conditional_disconnect(task->tk_xprt, req->rq_connect_cookie); break; default: /* * We were unable to reply and will have to drop the * request. The server should reconnect and retransmit. */ BUG_ON(task->tk_status == -EAGAIN); printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); break; } rpc_wake_up_queued_task(&req->rq_xprt->pending, task); } #endif /* CONFIG_NFS_V4_1 */ /* * 6. Sort out the RPC call status */ static void call_status(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; int status; if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent) task->tk_status = req->rq_reply_bytes_recvd; dprint_status(task); status = task->tk_status; if (status >= 0) { task->tk_action = call_decode; return; } task->tk_status = 0; switch(status) { case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: /* * Delay any retries for 3 seconds, then handle as if it * were a timeout. */ rpc_delay(task, 3*HZ); case -ETIMEDOUT: task->tk_action = call_timeout; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(task->tk_xprt, req->rq_connect_cookie); break; case -ECONNRESET: case -ECONNREFUSED: rpc_force_rebind(clnt); rpc_delay(task, 3*HZ); case -EPIPE: case -ENOTCONN: task->tk_action = call_bind; break; case -EAGAIN: task->tk_action = call_transmit; break; case -EIO: /* shutdown or soft timeout */ rpc_exit(task, status); break; default: if (clnt->cl_chatty) printk("%s: RPC call returned error %d\n", clnt->cl_protname, -status); rpc_exit(task, status); } } /* * 6a. Handle RPC timeout * We do not release the request slot, so we keep using the * same XID for all retransmits. */ static void call_timeout(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; if (xprt_adjust_timeout(task->tk_rqstp) == 0) { dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); goto retry; } dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); task->tk_timeouts++; if (RPC_IS_SOFTCONN(task)) { rpc_exit(task, -ETIMEDOUT); return; } if (RPC_IS_SOFT(task)) { if (clnt->cl_chatty) printk(KERN_NOTICE "%s: server %s not responding, timed out\n", clnt->cl_protname, clnt->cl_server); if (task->tk_flags & RPC_TASK_TIMEOUT) rpc_exit(task, -ETIMEDOUT); else rpc_exit(task, -EIO); return; } if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { task->tk_flags |= RPC_CALL_MAJORSEEN; if (clnt->cl_chatty) printk(KERN_NOTICE "%s: server %s not responding, still trying\n", clnt->cl_protname, clnt->cl_server); } rpc_force_rebind(clnt); /* * Did our request time out due to an RPCSEC_GSS out-of-sequence * event? RFC2203 requires the server to drop all such requests. */ rpcauth_invalcred(task); retry: clnt->cl_stats->rpcretrans++; task->tk_action = call_bind; task->tk_status = 0; } /* * 7. Decode the RPC reply */ static void call_decode(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; __be32 *p; dprintk("RPC: %5u call_decode (status %d)\n", task->tk_pid, task->tk_status); if (task->tk_flags & RPC_CALL_MAJORSEEN) { if (clnt->cl_chatty) printk(KERN_NOTICE "%s: server %s OK\n", clnt->cl_protname, clnt->cl_server); task->tk_flags &= ~RPC_CALL_MAJORSEEN; } /* * Ensure that we see all writes made by xprt_complete_rqst() * before it changed req->rq_reply_bytes_recvd. */ smp_rmb(); req->rq_rcv_buf.len = req->rq_private_buf.len; /* Check that the softirq receive buffer is valid */ WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, sizeof(req->rq_rcv_buf)) != 0); if (req->rq_rcv_buf.len < 12) { if (!RPC_IS_SOFT(task)) { task->tk_action = call_bind; clnt->cl_stats->rpcretrans++; goto out_retry; } dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", clnt->cl_protname, task->tk_status); task->tk_action = call_timeout; goto out_retry; } p = rpc_verify_header(task); if (IS_ERR(p)) { if (p == ERR_PTR(-EAGAIN)) goto out_retry; return; } task->tk_action = rpc_exit_task; if (decode) { task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, task->tk_msg.rpc_resp); } dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, task->tk_status); return; out_retry: task->tk_status = 0; /* Note: rpc_verify_header() may have freed the RPC slot */ if (task->tk_rqstp == req) { req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(task->tk_xprt, req->rq_connect_cookie); } } static __be32 * rpc_encode_header(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; __be32 *p = req->rq_svec[0].iov_base; /* FIXME: check buffer size? */ p = xprt_skip_transport_header(task->tk_xprt, p); *p++ = req->rq_xid; /* XID */ *p++ = htonl(RPC_CALL); /* CALL */ *p++ = htonl(RPC_VERSION); /* RPC version */ *p++ = htonl(clnt->cl_prog); /* program number */ *p++ = htonl(clnt->cl_vers); /* program version */ *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ p = rpcauth_marshcred(task, p); req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); return p; } static __be32 * rpc_verify_header(struct rpc_task *task) { struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; int len = task->tk_rqstp->rq_rcv_buf.len >> 2; __be32 *p = iov->iov_base; u32 n; int error = -EACCES; if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { /* RFC-1014 says that the representation of XDR data must be a * multiple of four bytes * - if it isn't pointer subtraction in the NFS client may give * undefined results */ dprintk("RPC: %5u %s: XDR representation not a multiple of" " 4 bytes: 0x%x\n", task->tk_pid, __func__, task->tk_rqstp->rq_rcv_buf.len); goto out_eio; } if ((len -= 3) < 0) goto out_overflow; p += 1; /* skip XID */ if ((n = ntohl(*p++)) != RPC_REPLY) { dprintk("RPC: %5u %s: not an RPC reply: %x\n", task->tk_pid, __func__, n); goto out_garbage; } if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { if (--len < 0) goto out_overflow; switch ((n = ntohl(*p++))) { case RPC_AUTH_ERROR: break; case RPC_MISMATCH: dprintk("RPC: %5u %s: RPC call version " "mismatch!\n", task->tk_pid, __func__); error = -EPROTONOSUPPORT; goto out_err; default: dprintk("RPC: %5u %s: RPC call rejected, " "unknown error: %x\n", task->tk_pid, __func__, n); goto out_eio; } if (--len < 0) goto out_overflow; switch ((n = ntohl(*p++))) { case RPC_AUTH_REJECTEDCRED: case RPC_AUTH_REJECTEDVERF: case RPCSEC_GSS_CREDPROBLEM: case RPCSEC_GSS_CTXPROBLEM: if (!task->tk_cred_retry) break; task->tk_cred_retry--; dprintk("RPC: %5u %s: retry stale creds\n", task->tk_pid, __func__); rpcauth_invalcred(task); /* Ensure we obtain a new XID! */ xprt_release(task); task->tk_action = call_reserve; goto out_retry; case RPC_AUTH_BADCRED: case RPC_AUTH_BADVERF: /* possibly garbled cred/verf? */ if (!task->tk_garb_retry) break; task->tk_garb_retry--; dprintk("RPC: %5u %s: retry garbled creds\n", task->tk_pid, __func__); task->tk_action = call_bind; goto out_retry; case RPC_AUTH_TOOWEAK: printk(KERN_NOTICE "RPC: server %s requires stronger " "authentication.\n", task->tk_client->cl_server); break; default: dprintk("RPC: %5u %s: unknown auth error: %x\n", task->tk_pid, __func__, n); error = -EIO; } dprintk("RPC: %5u %s: call rejected %d\n", task->tk_pid, __func__, n); goto out_err; } if (!(p = rpcauth_checkverf(task, p))) { dprintk("RPC: %5u %s: auth check failed\n", task->tk_pid, __func__); goto out_garbage; /* bad verifier, retry */ } len = p - (__be32 *)iov->iov_base - 1; if (len < 0) goto out_overflow; switch ((n = ntohl(*p++))) { case RPC_SUCCESS: return p; case RPC_PROG_UNAVAIL: dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", task->tk_pid, __func__, (unsigned int)task->tk_client->cl_prog, task->tk_client->cl_server); error = -EPFNOSUPPORT; goto out_err; case RPC_PROG_MISMATCH: dprintk("RPC: %5u %s: program %u, version %u unsupported by " "server %s\n", task->tk_pid, __func__, (unsigned int)task->tk_client->cl_prog, (unsigned int)task->tk_client->cl_vers, task->tk_client->cl_server); error = -EPROTONOSUPPORT; goto out_err; case RPC_PROC_UNAVAIL: dprintk("RPC: %5u %s: proc %s unsupported by program %u, " "version %u on server %s\n", task->tk_pid, __func__, rpc_proc_name(task), task->tk_client->cl_prog, task->tk_client->cl_vers, task->tk_client->cl_server); error = -EOPNOTSUPP; goto out_err; case RPC_GARBAGE_ARGS: dprintk("RPC: %5u %s: server saw garbage\n", task->tk_pid, __func__); break; /* retry */ default: dprintk("RPC: %5u %s: server accept status: %x\n", task->tk_pid, __func__, n); /* Also retry */ } out_garbage: task->tk_client->cl_stats->rpcgarbage++; if (task->tk_garb_retry) { task->tk_garb_retry--; dprintk("RPC: %5u %s: retrying\n", task->tk_pid, __func__); task->tk_action = call_bind; out_retry: return ERR_PTR(-EAGAIN); } out_eio: error = -EIO; out_err: rpc_exit(task, error); dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, __func__, error); return ERR_PTR(error); out_overflow: dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, __func__); goto out_garbage; } static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj) { } static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj) { return 0; } static struct rpc_procinfo rpcproc_null = { .p_encode = rpcproc_encode_null, .p_decode = rpcproc_decode_null, }; static int rpc_ping(struct rpc_clnt *clnt) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, }; int err; msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN); put_rpccred(msg.rpc_cred); return err; } struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, .rpc_cred = cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = &msg, .callback_ops = &rpc_default_ops, .flags = flags, }; return rpc_run_task(&task_setup_data); } EXPORT_SYMBOL_GPL(rpc_call_null); #ifdef RPC_DEBUG static void rpc_show_header(void) { printk(KERN_INFO "-pid- flgs status -client- --rqstp- " "-timeout ---ops--\n"); } static void rpc_show_task(const struct rpc_clnt *clnt, const struct rpc_task *task) { const char *rpc_waitq = "none"; if (RPC_IS_QUEUED(task)) rpc_waitq = rpc_qname(task->tk_waitqueue); printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", task->tk_pid, task->tk_flags, task->tk_status, clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), task->tk_action, rpc_waitq); } void rpc_show_tasks(void) { struct rpc_clnt *clnt; struct rpc_task *task; int header = 0; spin_lock(&rpc_client_lock); list_for_each_entry(clnt, &all_clients, cl_clients) { spin_lock(&clnt->cl_lock); list_for_each_entry(task, &clnt->cl_tasks, tk_task) { if (!header) { rpc_show_header(); header++; } rpc_show_task(clnt, task); } spin_unlock(&clnt->cl_lock); } spin_unlock(&rpc_client_lock); } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3471_2
crossvul-cpp_data_bad_1472_0
/* * CUSE: Character device in Userspace * * Copyright (C) 2008-2009 SUSE Linux Products GmbH * Copyright (C) 2008-2009 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * CUSE enables character devices to be implemented from userland much * like FUSE allows filesystems. On initialization /dev/cuse is * created. By opening the file and replying to the CUSE_INIT request * userland CUSE server can create a character device. After that the * operation is very similar to FUSE. * * A CUSE instance involves the following objects. * * cuse_conn : contains fuse_conn and serves as bonding structure * channel : file handle connected to the userland CUSE server * cdev : the implemented character device * dev : generic device for cdev * * Note that 'channel' is what 'dev' is in FUSE. As CUSE deals with * devices, it's called 'channel' to reduce confusion. * * channel determines when the character device dies. When channel is * closed, everything begins to destruct. The cuse_conn is taken off * the lookup table preventing further access from cdev, cdev and * generic device are removed and the base reference of cuse_conn is * put. * * On each open, the matching cuse_conn is looked up and if found an * additional reference is taken which is released when the file is * closed. */ #include <linux/fuse.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/kdev_t.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/magic.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/module.h> #include <linux/uio.h> #include "fuse_i.h" #define CUSE_CONNTBL_LEN 64 struct cuse_conn { struct list_head list; /* linked on cuse_conntbl */ struct fuse_conn fc; /* fuse connection */ struct cdev *cdev; /* associated character device */ struct device *dev; /* device representing @cdev */ /* init parameters, set once during initialization */ bool unrestricted_ioctl; }; static DEFINE_MUTEX(cuse_lock); /* protects registration */ static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; static struct class *cuse_class; static struct cuse_conn *fc_to_cc(struct fuse_conn *fc) { return container_of(fc, struct cuse_conn, fc); } static struct list_head *cuse_conntbl_head(dev_t devt) { return &cuse_conntbl[(MAJOR(devt) + MINOR(devt)) % CUSE_CONNTBL_LEN]; } /************************************************************************** * CUSE frontend operations * * These are file operations for the character device. * * On open, CUSE opens a file from the FUSE mnt and stores it to * private_data of the open file. All other ops call FUSE ops on the * FUSE file. */ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to) { struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp }; loff_t pos = 0; return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE); } static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from) { struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp }; loff_t pos = 0; /* * No locking or generic_write_checks(), the server is * responsible for locking and sanity checks. */ return fuse_direct_io(&io, from, &pos, FUSE_DIO_WRITE | FUSE_DIO_CUSE); } static int cuse_open(struct inode *inode, struct file *file) { dev_t devt = inode->i_cdev->dev; struct cuse_conn *cc = NULL, *pos; int rc; /* look up and get the connection */ mutex_lock(&cuse_lock); list_for_each_entry(pos, cuse_conntbl_head(devt), list) if (pos->dev->devt == devt) { fuse_conn_get(&pos->fc); cc = pos; break; } mutex_unlock(&cuse_lock); /* dead? */ if (!cc) return -ENODEV; /* * Generic permission check is already done against the chrdev * file, proceed to open. */ rc = fuse_do_open(&cc->fc, 0, file, 0); if (rc) fuse_conn_put(&cc->fc); return rc; } static int cuse_release(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; fuse_sync_release(ff, file->f_flags); fuse_conn_put(fc); return 0; } static long cuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fuse_file *ff = file->private_data; struct cuse_conn *cc = fc_to_cc(ff->fc); unsigned int flags = 0; if (cc->unrestricted_ioctl) flags |= FUSE_IOCTL_UNRESTRICTED; return fuse_do_ioctl(file, cmd, arg, flags); } static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fuse_file *ff = file->private_data; struct cuse_conn *cc = fc_to_cc(ff->fc); unsigned int flags = FUSE_IOCTL_COMPAT; if (cc->unrestricted_ioctl) flags |= FUSE_IOCTL_UNRESTRICTED; return fuse_do_ioctl(file, cmd, arg, flags); } static const struct file_operations cuse_frontend_fops = { .owner = THIS_MODULE, .read_iter = cuse_read_iter, .write_iter = cuse_write_iter, .open = cuse_open, .release = cuse_release, .unlocked_ioctl = cuse_file_ioctl, .compat_ioctl = cuse_file_compat_ioctl, .poll = fuse_file_poll, .llseek = noop_llseek, }; /************************************************************************** * CUSE channel initialization and destruction */ struct cuse_devinfo { const char *name; }; /** * cuse_parse_one - parse one key=value pair * @pp: i/o parameter for the current position * @end: points to one past the end of the packed string * @keyp: out parameter for key * @valp: out parameter for value * * *@pp points to packed strings - "key0=val0\0key1=val1\0" which ends * at @end - 1. This function parses one pair and set *@keyp to the * start of the key and *@valp to the start of the value. Note that * the original string is modified such that the key string is * terminated with '\0'. *@pp is updated to point to the next string. * * RETURNS: * 1 on successful parse, 0 on EOF, -errno on failure. */ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) { char *p = *pp; char *key, *val; while (p < end && *p == '\0') p++; if (p == end) return 0; if (end[-1] != '\0') { printk(KERN_ERR "CUSE: info not properly terminated\n"); return -EINVAL; } key = val = p; p += strlen(p); if (valp) { strsep(&val, "="); if (!val) val = key + strlen(key); key = strstrip(key); val = strstrip(val); } else key = strstrip(key); if (!strlen(key)) { printk(KERN_ERR "CUSE: zero length info key specified\n"); return -EINVAL; } *pp = p; *keyp = key; if (valp) *valp = val; return 1; } /** * cuse_parse_dev_info - parse device info * @p: device info string * @len: length of device info string * @devinfo: out parameter for parsed device info * * Parse @p to extract device info and store it into @devinfo. String * pointed to by @p is modified by parsing and @devinfo points into * them, so @p shouldn't be freed while @devinfo is in use. * * RETURNS: * 0 on success, -errno on failure. */ static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) { char *end = p + len; char *uninitialized_var(key), *uninitialized_var(val); int rc; while (true) { rc = cuse_parse_one(&p, end, &key, &val); if (rc < 0) return rc; if (!rc) break; if (strcmp(key, "DEVNAME") == 0) devinfo->name = val; else printk(KERN_WARNING "CUSE: unknown device info \"%s\"\n", key); } if (!devinfo->name || !strlen(devinfo->name)) { printk(KERN_ERR "CUSE: DEVNAME unspecified\n"); return -EINVAL; } return 0; } static void cuse_gendev_release(struct device *dev) { kfree(dev); } /** * cuse_process_init_reply - finish initializing CUSE channel * * This function creates the character device and sets up all the * required data structures for it. Please read the comment at the * top of this file for high level overview. */ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) { struct cuse_conn *cc = fc_to_cc(fc), *pos; struct cuse_init_out *arg = req->out.args[0].value; struct page *page = req->pages[0]; struct cuse_devinfo devinfo = { }; struct device *dev; struct cdev *cdev; dev_t devt; int rc, i; if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { goto err; } fc->minor = arg->minor; fc->max_read = max_t(unsigned, arg->max_read, 4096); fc->max_write = max_t(unsigned, arg->max_write, 4096); /* parse init reply */ cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL; rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size, &devinfo); if (rc) goto err; /* determine and reserve devt */ devt = MKDEV(arg->dev_major, arg->dev_minor); if (!MAJOR(devt)) rc = alloc_chrdev_region(&devt, MINOR(devt), 1, devinfo.name); else rc = register_chrdev_region(devt, 1, devinfo.name); if (rc) { printk(KERN_ERR "CUSE: failed to register chrdev region\n"); goto err; } /* devt determined, create device */ rc = -ENOMEM; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) goto err_region; device_initialize(dev); dev_set_uevent_suppress(dev, 1); dev->class = cuse_class; dev->devt = devt; dev->release = cuse_gendev_release; dev_set_drvdata(dev, cc); dev_set_name(dev, "%s", devinfo.name); mutex_lock(&cuse_lock); /* make sure the device-name is unique */ for (i = 0; i < CUSE_CONNTBL_LEN; ++i) { list_for_each_entry(pos, &cuse_conntbl[i], list) if (!strcmp(dev_name(pos->dev), dev_name(dev))) goto err_unlock; } rc = device_add(dev); if (rc) goto err_unlock; /* register cdev */ rc = -ENOMEM; cdev = cdev_alloc(); if (!cdev) goto err_unlock; cdev->owner = THIS_MODULE; cdev->ops = &cuse_frontend_fops; rc = cdev_add(cdev, devt, 1); if (rc) goto err_cdev; cc->dev = dev; cc->cdev = cdev; /* make the device available */ list_add(&cc->list, cuse_conntbl_head(devt)); mutex_unlock(&cuse_lock); /* announce device availability */ dev_set_uevent_suppress(dev, 0); kobject_uevent(&dev->kobj, KOBJ_ADD); out: kfree(arg); __free_page(page); return; err_cdev: cdev_del(cdev); err_unlock: mutex_unlock(&cuse_lock); put_device(dev); err_region: unregister_chrdev_region(devt, 1); err: fuse_abort_conn(fc); goto out; } static int cuse_send_init(struct cuse_conn *cc) { int rc; struct fuse_req *req; struct page *page; struct fuse_conn *fc = &cc->fc; struct cuse_init_in *arg; void *outarg; BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); req = fuse_get_req_for_background(fc, 1); if (IS_ERR(req)) { rc = PTR_ERR(req); goto err; } rc = -ENOMEM; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto err_put_req; outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL); if (!outarg) goto err_free_page; arg = &req->misc.cuse_init_in; arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; arg->flags |= CUSE_UNRESTRICTED_IOCTL; req->in.h.opcode = CUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(struct cuse_init_in); req->in.args[0].value = arg; req->out.numargs = 2; req->out.args[0].size = sizeof(struct cuse_init_out); req->out.args[0].value = outarg; req->out.args[1].size = CUSE_INIT_INFO_MAX; req->out.argvar = 1; req->out.argpages = 1; req->pages[0] = page; req->page_descs[0].length = req->out.args[1].size; req->num_pages = 1; req->end = cuse_process_init_reply; fuse_request_send_background(fc, req); return 0; err_free_page: __free_page(page); err_put_req: fuse_put_request(fc, req); err: return rc; } static void cuse_fc_release(struct fuse_conn *fc) { struct cuse_conn *cc = fc_to_cc(fc); kfree_rcu(cc, fc.rcu); } /** * cuse_channel_open - open method for /dev/cuse * @inode: inode for /dev/cuse * @file: file struct being opened * * Userland CUSE server can create a CUSE device by opening /dev/cuse * and replying to the initialization request kernel sends. This * function is responsible for handling CUSE device initialization. * Because the fd opened by this function is used during * initialization, this function only creates cuse_conn and sends * init. The rest is delegated to a kthread. * * RETURNS: * 0 on success, -errno on failure. */ static int cuse_channel_open(struct inode *inode, struct file *file) { struct fuse_dev *fud; struct cuse_conn *cc; int rc; /* set up cuse_conn */ cc = kzalloc(sizeof(*cc), GFP_KERNEL); if (!cc) return -ENOMEM; fuse_conn_init(&cc->fc); fud = fuse_dev_alloc(&cc->fc); if (!fud) { kfree(cc); return -ENOMEM; } INIT_LIST_HEAD(&cc->list); cc->fc.release = cuse_fc_release; cc->fc.initialized = 1; rc = cuse_send_init(cc); if (rc) { fuse_dev_free(fud); return rc; } file->private_data = fud; return 0; } /** * cuse_channel_release - release method for /dev/cuse * @inode: inode for /dev/cuse * @file: file struct being closed * * Disconnect the channel, deregister CUSE device and initiate * destruction by putting the default reference. * * RETURNS: * 0 on success, -errno on failure. */ static int cuse_channel_release(struct inode *inode, struct file *file) { struct fuse_dev *fud = file->private_data; struct cuse_conn *cc = fc_to_cc(fud->fc); int rc; /* remove from the conntbl, no more access from this point on */ mutex_lock(&cuse_lock); list_del_init(&cc->list); mutex_unlock(&cuse_lock); /* remove device */ if (cc->dev) device_unregister(cc->dev); if (cc->cdev) { unregister_chrdev_region(cc->cdev->dev, 1); cdev_del(cc->cdev); } rc = fuse_dev_release(inode, file); /* puts the base reference */ return rc; } static struct file_operations cuse_channel_fops; /* initialized during init */ /************************************************************************** * Misc stuff and module initializatiion * * CUSE exports the same set of attributes to sysfs as fusectl. */ static ssize_t cuse_class_waiting_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cuse_conn *cc = dev_get_drvdata(dev); return sprintf(buf, "%d\n", atomic_read(&cc->fc.num_waiting)); } static DEVICE_ATTR(waiting, 0400, cuse_class_waiting_show, NULL); static ssize_t cuse_class_abort_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cuse_conn *cc = dev_get_drvdata(dev); fuse_abort_conn(&cc->fc); return count; } static DEVICE_ATTR(abort, 0200, NULL, cuse_class_abort_store); static struct attribute *cuse_class_dev_attrs[] = { &dev_attr_waiting.attr, &dev_attr_abort.attr, NULL, }; ATTRIBUTE_GROUPS(cuse_class_dev); static struct miscdevice cuse_miscdev = { .minor = CUSE_MINOR, .name = "cuse", .fops = &cuse_channel_fops, }; MODULE_ALIAS_MISCDEV(CUSE_MINOR); MODULE_ALIAS("devname:cuse"); static int __init cuse_init(void) { int i, rc; /* init conntbl */ for (i = 0; i < CUSE_CONNTBL_LEN; i++) INIT_LIST_HEAD(&cuse_conntbl[i]); /* inherit and extend fuse_dev_operations */ cuse_channel_fops = fuse_dev_operations; cuse_channel_fops.owner = THIS_MODULE; cuse_channel_fops.open = cuse_channel_open; cuse_channel_fops.release = cuse_channel_release; cuse_class = class_create(THIS_MODULE, "cuse"); if (IS_ERR(cuse_class)) return PTR_ERR(cuse_class); cuse_class->dev_groups = cuse_class_dev_groups; rc = misc_register(&cuse_miscdev); if (rc) { class_destroy(cuse_class); return rc; } return 0; } static void __exit cuse_exit(void) { misc_deregister(&cuse_miscdev); class_destroy(cuse_class); } module_init(cuse_init); module_exit(cuse_exit); MODULE_AUTHOR("Tejun Heo <tj@kernel.org>"); MODULE_DESCRIPTION("Character device in Userspace"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-399/c/bad_1472_0
crossvul-cpp_data_bad_3486_15
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/m68k/kernel/ptrace.c" * Copyright (C) 1994 by Hamish Macdonald * Taken from linux/kernel/ptrace.c and modified for M680x0. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds * * Modified by Cort Dougan (cort@hq.fsmlabs.com) * and Paul Mackerras (paulus@samba.org). * * This file is subject to the terms and conditions of the GNU General * Public License. See the file README.legal in the main directory of * this archive for more details. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/tracehook.h> #include <linux/elf.h> #include <linux/user.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/seccomp.h> #include <linux/audit.h> #include <trace/syscall.h> #ifdef CONFIG_PPC32 #include <linux/module.h> #endif #include <linux/hw_breakpoint.h> #include <linux/perf_event.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/system.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> /* * The parameter save area on the stack is used to store arguments being passed * to callee function and is located at fixed offset from stack pointer. */ #ifdef CONFIG_PPC32 #define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */ #else /* CONFIG_PPC32 */ #define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */ #endif struct pt_regs_offset { const char *name; int offset; }; #define STR(s) #s /* convert to string */ #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} #define GPR_OFFSET_NAME(num) \ {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} #define REG_OFFSET_END {.name = NULL, .offset = 0} static const struct pt_regs_offset regoffset_table[] = { GPR_OFFSET_NAME(0), GPR_OFFSET_NAME(1), GPR_OFFSET_NAME(2), GPR_OFFSET_NAME(3), GPR_OFFSET_NAME(4), GPR_OFFSET_NAME(5), GPR_OFFSET_NAME(6), GPR_OFFSET_NAME(7), GPR_OFFSET_NAME(8), GPR_OFFSET_NAME(9), GPR_OFFSET_NAME(10), GPR_OFFSET_NAME(11), GPR_OFFSET_NAME(12), GPR_OFFSET_NAME(13), GPR_OFFSET_NAME(14), GPR_OFFSET_NAME(15), GPR_OFFSET_NAME(16), GPR_OFFSET_NAME(17), GPR_OFFSET_NAME(18), GPR_OFFSET_NAME(19), GPR_OFFSET_NAME(20), GPR_OFFSET_NAME(21), GPR_OFFSET_NAME(22), GPR_OFFSET_NAME(23), GPR_OFFSET_NAME(24), GPR_OFFSET_NAME(25), GPR_OFFSET_NAME(26), GPR_OFFSET_NAME(27), GPR_OFFSET_NAME(28), GPR_OFFSET_NAME(29), GPR_OFFSET_NAME(30), GPR_OFFSET_NAME(31), REG_OFFSET_NAME(nip), REG_OFFSET_NAME(msr), REG_OFFSET_NAME(ctr), REG_OFFSET_NAME(link), REG_OFFSET_NAME(xer), REG_OFFSET_NAME(ccr), #ifdef CONFIG_PPC64 REG_OFFSET_NAME(softe), #else REG_OFFSET_NAME(mq), #endif REG_OFFSET_NAME(trap), REG_OFFSET_NAME(dar), REG_OFFSET_NAME(dsisr), REG_OFFSET_END, }; /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL; */ int regs_query_register_offset(const char *name) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL; } /** * regs_query_register_name() - query register name from its offset * @offset: the offset of a register in struct pt_regs. * * regs_query_register_name() returns the name of a register from its * offset in struct pt_regs. If the @offset is invalid, this returns NULL; */ const char *regs_query_register_name(unsigned int offset) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (roff->offset == offset) return roff->name; return NULL; } /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * Set of msr bits that gdb can change on behalf of a process. */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS #define MSR_DEBUGCHANGE 0 #else #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) #endif /* * Max register writeable via put_reg */ #ifdef CONFIG_PPC32 #define PT_MAX_PUT_REG PT_MQ #else #define PT_MAX_PUT_REG PT_CCR #endif static unsigned long get_user_msr(struct task_struct *task) { return task->thread.regs->msr | task->thread.fpexc_mode; } static int set_user_msr(struct task_struct *task, unsigned long msr) { task->thread.regs->msr &= ~MSR_DEBUGCHANGE; task->thread.regs->msr |= msr & MSR_DEBUGCHANGE; return 0; } /* * We prevent mucking around with the reserved area of trap * which are used internally by the kernel. */ static int set_user_trap(struct task_struct *task, unsigned long trap) { task->thread.regs->trap = trap & 0xfff0; return 0; } /* * Get contents of register REGNO in task TASK. */ unsigned long ptrace_get_reg(struct task_struct *task, int regno) { if (task->thread.regs == NULL) return -EIO; if (regno == PT_MSR) return get_user_msr(task); if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) return ((unsigned long *)task->thread.regs)[regno]; return -EIO; } /* * Write contents of register REGNO in task TASK. */ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data) { if (task->thread.regs == NULL) return -EIO; if (regno == PT_MSR) return set_user_msr(task, data); if (regno == PT_TRAP) return set_user_trap(task, data); if (regno <= PT_MAX_PUT_REG) { ((unsigned long *)task->thread.regs)[regno] = data; return 0; } return -EIO; } static int gpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int i, ret; if (target->thread.regs == NULL) return -EIO; if (!FULL_REGS(target->thread.regs)) { /* We have a partial register set. Fill 14-31 with bogus values */ for (i = 14; i < 32; i++) target->thread.regs->gpr[i] = NV_REG_POISON; } ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, target->thread.regs, 0, offsetof(struct pt_regs, msr)); if (!ret) { unsigned long msr = get_user_msr(target); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr, offsetof(struct pt_regs, msr), offsetof(struct pt_regs, msr) + sizeof(msr)); } BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != offsetof(struct pt_regs, msr) + sizeof(long)); if (!ret) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.regs->orig_gpr3, offsetof(struct pt_regs, orig_gpr3), sizeof(struct pt_regs)); if (!ret) ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, sizeof(struct pt_regs), -1); return ret; } static int gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { unsigned long reg; int ret; if (target->thread.regs == NULL) return -EIO; CHECK_FULL_REGS(target->thread.regs); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, target->thread.regs, 0, PT_MSR * sizeof(reg)); if (!ret && count > 0) { ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg, PT_MSR * sizeof(reg), (PT_MSR + 1) * sizeof(reg)); if (!ret) ret = set_user_msr(target, reg); } BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != offsetof(struct pt_regs, msr) + sizeof(long)); if (!ret) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.regs->orig_gpr3, PT_ORIG_R3 * sizeof(reg), (PT_MAX_PUT_REG + 1) * sizeof(reg)); if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret) ret = user_regset_copyin_ignore( &pos, &count, &kbuf, &ubuf, (PT_MAX_PUT_REG + 1) * sizeof(reg), PT_TRAP * sizeof(reg)); if (!ret && count > 0) { ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg, PT_TRAP * sizeof(reg), (PT_TRAP + 1) * sizeof(reg)); if (!ret) ret = set_user_trap(target, reg); } if (!ret) ret = user_regset_copyin_ignore( &pos, &count, &kbuf, &ubuf, (PT_TRAP + 1) * sizeof(reg), -1); return ret; } static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { #ifdef CONFIG_VSX double buf[33]; int i; #endif flush_fp_to_thread(target); #ifdef CONFIG_VSX /* copy to local buffer then write that out */ for (i = 0; i < 32 ; i++) buf[i] = target->thread.TS_FPR(i); memcpy(&buf[32], &target->thread.fpscr, sizeof(double)); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); #else BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != offsetof(struct thread_struct, TS_FPR(32))); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fpr, 0, -1); #endif } static int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { #ifdef CONFIG_VSX double buf[33]; int i; #endif flush_fp_to_thread(target); #ifdef CONFIG_VSX /* copy to local buffer then write that out */ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); if (i) return i; for (i = 0; i < 32 ; i++) target->thread.TS_FPR(i) = buf[i]; memcpy(&target->thread.fpscr, &buf[32], sizeof(double)); return 0; #else BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != offsetof(struct thread_struct, TS_FPR(32))); return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fpr, 0, -1); #endif } #ifdef CONFIG_ALTIVEC /* * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. * The transfer totals 34 quadword. Quadwords 0-31 contain the * corresponding vector registers. Quadword 32 contains the vscr as the * last word (offset 12) within that quadword. Quadword 33 contains the * vrsave as the first word (offset 0) within the quadword. * * This definition of the VMX state is compatible with the current PPC32 * ptrace interface. This allows signal handling and ptrace to use the * same structures. This also simplifies the implementation of a bi-arch * (combined (32- and 64-bit) gdb. */ static int vr_active(struct task_struct *target, const struct user_regset *regset) { flush_altivec_to_thread(target); return target->thread.used_vr ? regset->n : 0; } static int vr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; flush_altivec_to_thread(target); BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != offsetof(struct thread_struct, vr[32])); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.vr, 0, 33 * sizeof(vector128)); if (!ret) { /* * Copy out only the low-order word of vrsave. */ union { elf_vrreg_t reg; u32 word; } vrsave; memset(&vrsave, 0, sizeof(vrsave)); vrsave.word = target->thread.vrsave; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, 33 * sizeof(vector128), -1); } return ret; } static int vr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; flush_altivec_to_thread(target); BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != offsetof(struct thread_struct, vr[32])); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.vr, 0, 33 * sizeof(vector128)); if (!ret && count > 0) { /* * We use only the first word of vrsave. */ union { elf_vrreg_t reg; u32 word; } vrsave; memset(&vrsave, 0, sizeof(vrsave)); vrsave.word = target->thread.vrsave; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave, 33 * sizeof(vector128), -1); if (!ret) target->thread.vrsave = vrsave.word; } return ret; } #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX /* * Currently to set and and get all the vsx state, you need to call * the fp and VMX calls as well. This only get/sets the lower 32 * 128bit VSX registers. */ static int vsr_active(struct task_struct *target, const struct user_regset *regset) { flush_vsx_to_thread(target); return target->thread.used_vsr ? regset->n : 0; } static int vsr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { double buf[32]; int ret, i; flush_vsx_to_thread(target); for (i = 0; i < 32 ; i++) buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET]; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); return ret; } static int vsr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { double buf[32]; int ret,i; flush_vsx_to_thread(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); for (i = 0; i < 32 ; i++) target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return ret; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* * For get_evrregs/set_evrregs functions 'data' has the following layout: * * struct { * u32 evr[32]; * u64 acc; * u32 spefscr; * } */ static int evr_active(struct task_struct *target, const struct user_regset *regset) { flush_spe_to_thread(target); return target->thread.used_spe ? regset->n : 0; } static int evr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; flush_spe_to_thread(target); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.evr, 0, sizeof(target->thread.evr)); BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) != offsetof(struct thread_struct, spefscr)); if (!ret) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.acc, sizeof(target->thread.evr), -1); return ret; } static int evr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; flush_spe_to_thread(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.evr, 0, sizeof(target->thread.evr)); BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) != offsetof(struct thread_struct, spefscr)); if (!ret) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.acc, sizeof(target->thread.evr), -1); return ret; } #endif /* CONFIG_SPE */ /* * These are our native regset flavors. */ enum powerpc_regset { REGSET_GPR, REGSET_FPR, #ifdef CONFIG_ALTIVEC REGSET_VMX, #endif #ifdef CONFIG_VSX REGSET_VSX, #endif #ifdef CONFIG_SPE REGSET_SPE, #endif }; static const struct user_regset native_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long), .get = gpr_get, .set = gpr_set }, [REGSET_FPR] = { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(double), .align = sizeof(double), .get = fpr_get, .set = fpr_set }, #ifdef CONFIG_ALTIVEC [REGSET_VMX] = { .core_note_type = NT_PPC_VMX, .n = 34, .size = sizeof(vector128), .align = sizeof(vector128), .active = vr_active, .get = vr_get, .set = vr_set }, #endif #ifdef CONFIG_VSX [REGSET_VSX] = { .core_note_type = NT_PPC_VSX, .n = 32, .size = sizeof(double), .align = sizeof(double), .active = vsr_active, .get = vsr_get, .set = vsr_set }, #endif #ifdef CONFIG_SPE [REGSET_SPE] = { .n = 35, .size = sizeof(u32), .align = sizeof(u32), .active = evr_active, .get = evr_get, .set = evr_set }, #endif }; static const struct user_regset_view user_ppc_native_view = { .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) }; #ifdef CONFIG_PPC64 #include <linux/compat.h> static int gpr32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const unsigned long *regs = &target->thread.regs->gpr[0]; compat_ulong_t *k = kbuf; compat_ulong_t __user *u = ubuf; compat_ulong_t reg; int i; if (target->thread.regs == NULL) return -EIO; if (!FULL_REGS(target->thread.regs)) { /* We have a partial register set. Fill 14-31 with bogus values */ for (i = 14; i < 32; i++) target->thread.regs->gpr[i] = NV_REG_POISON; } pos /= sizeof(reg); count /= sizeof(reg); if (kbuf) for (; count > 0 && pos < PT_MSR; --count) *k++ = regs[pos++]; else for (; count > 0 && pos < PT_MSR; --count) if (__put_user((compat_ulong_t) regs[pos++], u++)) return -EFAULT; if (count > 0 && pos == PT_MSR) { reg = get_user_msr(target); if (kbuf) *k++ = reg; else if (__put_user(reg, u++)) return -EFAULT; ++pos; --count; } if (kbuf) for (; count > 0 && pos < PT_REGS_COUNT; --count) *k++ = regs[pos++]; else for (; count > 0 && pos < PT_REGS_COUNT; --count) if (__put_user((compat_ulong_t) regs[pos++], u++)) return -EFAULT; kbuf = k; ubuf = u; pos *= sizeof(reg); count *= sizeof(reg); return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, PT_REGS_COUNT * sizeof(reg), -1); } static int gpr32_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { unsigned long *regs = &target->thread.regs->gpr[0]; const compat_ulong_t *k = kbuf; const compat_ulong_t __user *u = ubuf; compat_ulong_t reg; if (target->thread.regs == NULL) return -EIO; CHECK_FULL_REGS(target->thread.regs); pos /= sizeof(reg); count /= sizeof(reg); if (kbuf) for (; count > 0 && pos < PT_MSR; --count) regs[pos++] = *k++; else for (; count > 0 && pos < PT_MSR; --count) { if (__get_user(reg, u++)) return -EFAULT; regs[pos++] = reg; } if (count > 0 && pos == PT_MSR) { if (kbuf) reg = *k++; else if (__get_user(reg, u++)) return -EFAULT; set_user_msr(target, reg); ++pos; --count; } if (kbuf) { for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) regs[pos++] = *k++; for (; count > 0 && pos < PT_TRAP; --count, ++pos) ++k; } else { for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) { if (__get_user(reg, u++)) return -EFAULT; regs[pos++] = reg; } for (; count > 0 && pos < PT_TRAP; --count, ++pos) if (__get_user(reg, u++)) return -EFAULT; } if (count > 0 && pos == PT_TRAP) { if (kbuf) reg = *k++; else if (__get_user(reg, u++)) return -EFAULT; set_user_trap(target, reg); ++pos; --count; } kbuf = k; ubuf = u; pos *= sizeof(reg); count *= sizeof(reg); return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, (PT_TRAP + 1) * sizeof(reg), -1); } /* * These are the regset flavors matching the CONFIG_PPC32 native set. */ static const struct user_regset compat_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), .get = gpr32_get, .set = gpr32_set }, [REGSET_FPR] = { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(double), .align = sizeof(double), .get = fpr_get, .set = fpr_set }, #ifdef CONFIG_ALTIVEC [REGSET_VMX] = { .core_note_type = NT_PPC_VMX, .n = 34, .size = sizeof(vector128), .align = sizeof(vector128), .active = vr_active, .get = vr_get, .set = vr_set }, #endif #ifdef CONFIG_SPE [REGSET_SPE] = { .core_note_type = NT_PPC_SPE, .n = 35, .size = sizeof(u32), .align = sizeof(u32), .active = evr_active, .get = evr_get, .set = evr_set }, #endif }; static const struct user_regset_view user_ppc_compat_view = { .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI, .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) }; #endif /* CONFIG_PPC64 */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_PPC64 if (test_tsk_thread_flag(task, TIF_32BIT)) return &user_ppc_compat_view; #endif return &user_ppc_native_view; } void user_enable_single_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; if (regs != NULL) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS task->thread.dbcr0 &= ~DBCR0_BT; task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; regs->msr |= MSR_DE; #else regs->msr &= ~MSR_BE; regs->msr |= MSR_SE; #endif } set_tsk_thread_flag(task, TIF_SINGLESTEP); } void user_enable_block_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; if (regs != NULL) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS task->thread.dbcr0 &= ~DBCR0_IC; task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; regs->msr |= MSR_DE; #else regs->msr &= ~MSR_SE; regs->msr |= MSR_BE; #endif } set_tsk_thread_flag(task, TIF_SINGLESTEP); } void user_disable_single_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; if (regs != NULL) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* * The logic to disable single stepping should be as * simple as turning off the Instruction Complete flag. * And, after doing so, if all debug flags are off, turn * off DBCR0(IDM) and MSR(DE) .... Torez */ task->thread.dbcr0 &= ~DBCR0_IC; /* * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set. */ if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, task->thread.dbcr1)) { /* * All debug events were off..... */ task->thread.dbcr0 &= ~DBCR0_IDM; regs->msr &= ~MSR_DE; } #else regs->msr &= ~(MSR_SE | MSR_BE); #endif } clear_tsk_thread_flag(task, TIF_SINGLESTEP); } #ifdef CONFIG_HAVE_HW_BREAKPOINT void ptrace_triggered(struct perf_event *bp, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event_attr attr; /* * Disable the breakpoint request here since ptrace has defined a * one-shot behaviour for breakpoint exceptions in PPC64. * The SIGTRAP signal is generated automatically for us in do_dabr(). * We don't have to do anything about that here */ attr = bp->attr; attr.disabled = true; modify_user_hw_breakpoint(bp, &attr); } #endif /* CONFIG_HAVE_HW_BREAKPOINT */ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data) { #ifdef CONFIG_HAVE_HW_BREAKPOINT int ret; struct thread_struct *thread = &(task->thread); struct perf_event *bp; struct perf_event_attr attr; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ /* For ppc64 we support one DABR and no IABR's at the moment (ppc64). * For embedded processors we support one DAC and no IAC's at the * moment. */ if (addr > 0) return -EINVAL; /* The bottom 3 bits in dabr are flags */ if ((data & ~0x7UL) >= TASK_SIZE) return -EIO; #ifndef CONFIG_PPC_ADV_DEBUG_REGS /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. * It was assumed, on previous implementations, that 3 bits were * passed together with the data address, fitting the design of the * DABR register, as follows: * * bit 0: Read flag * bit 1: Write flag * bit 2: Breakpoint translation * * Thus, we use them here as so. */ /* Ensure breakpoint translation bit is set */ if (data && !(data & DABR_TRANSLATION)) return -EIO; #ifdef CONFIG_HAVE_HW_BREAKPOINT if (ptrace_get_breakpoints(task) < 0) return -ESRCH; bp = thread->ptrace_bps[0]; if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; } ptrace_put_breakpoints(task); return 0; } if (bp) { attr = bp->attr; attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN; arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ), &attr.bp_type); ret = modify_user_hw_breakpoint(bp, &attr); if (ret) { ptrace_put_breakpoints(task); return ret; } thread->ptrace_bps[0] = bp; ptrace_put_breakpoints(task); thread->dabr = data; return 0; } /* Create a new breakpoint request if one doesn't exist already */ hw_breakpoint_init(&attr); attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN; arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ), &attr.bp_type); thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, ptrace_triggered, task); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; ptrace_put_breakpoints(task); return PTR_ERR(bp); } ptrace_put_breakpoints(task); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ /* Move contents to the DABR register */ task->thread.dabr = data; #else /* CONFIG_PPC_ADV_DEBUG_REGS */ /* As described above, it was assumed 3 bits were passed with the data * address, but we will assume only the mode bits will be passed * as to not cause alignment restrictions for DAC-based processors. */ /* DAC's hold the whole address without any mode flags */ task->thread.dac1 = data & ~0x3UL; if (task->thread.dac1 == 0) { dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, task->thread.dbcr1)) { task->thread.regs->msr &= ~MSR_DE; task->thread.dbcr0 &= ~DBCR0_IDM; } return 0; } /* Read or Write bits must be set */ if (!(data & 0x3UL)) return -EINVAL; /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 register */ task->thread.dbcr0 |= DBCR0_IDM; /* Check for write and read flags and set DBCR0 accordingly */ dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W); if (data & 0x1UL) dbcr_dac(task) |= DBCR_DAC1R; if (data & 0x2UL) dbcr_dac(task) |= DBCR_DAC1W; task->thread.regs->msr |= MSR_DE; #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ return 0; } /* * Called by kernel/ptrace.c when detaching.. * * Make sure single step bits etc are not set. */ void ptrace_disable(struct task_struct *child) { /* make sure the single step bit is not set. */ user_disable_single_step(child); } #ifdef CONFIG_PPC_ADV_DEBUG_REGS static long set_intruction_bp(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { int slot; int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0); int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0); int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0); int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0); if (dbcr_iac_range(child) & DBCR_IAC12MODE) slot2_in_use = 1; if (dbcr_iac_range(child) & DBCR_IAC34MODE) slot4_in_use = 1; if (bp_info->addr >= TASK_SIZE) return -EIO; if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) { /* Make sure range is valid. */ if (bp_info->addr2 >= TASK_SIZE) return -EIO; /* We need a pair of IAC regsisters */ if ((!slot1_in_use) && (!slot2_in_use)) { slot = 1; child->thread.iac1 = bp_info->addr; child->thread.iac2 = bp_info->addr2; child->thread.dbcr0 |= DBCR0_IAC1; if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) dbcr_iac_range(child) |= DBCR_IAC12X; else dbcr_iac_range(child) |= DBCR_IAC12I; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 } else if ((!slot3_in_use) && (!slot4_in_use)) { slot = 3; child->thread.iac3 = bp_info->addr; child->thread.iac4 = bp_info->addr2; child->thread.dbcr0 |= DBCR0_IAC3; if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) dbcr_iac_range(child) |= DBCR_IAC34X; else dbcr_iac_range(child) |= DBCR_IAC34I; #endif } else return -ENOSPC; } else { /* We only need one. If possible leave a pair free in * case a range is needed later */ if (!slot1_in_use) { /* * Don't use iac1 if iac1-iac2 are free and either * iac3 or iac4 (but not both) are free */ if (slot2_in_use || (slot3_in_use == slot4_in_use)) { slot = 1; child->thread.iac1 = bp_info->addr; child->thread.dbcr0 |= DBCR0_IAC1; goto out; } } if (!slot2_in_use) { slot = 2; child->thread.iac2 = bp_info->addr; child->thread.dbcr0 |= DBCR0_IAC2; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 } else if (!slot3_in_use) { slot = 3; child->thread.iac3 = bp_info->addr; child->thread.dbcr0 |= DBCR0_IAC3; } else if (!slot4_in_use) { slot = 4; child->thread.iac4 = bp_info->addr; child->thread.dbcr0 |= DBCR0_IAC4; #endif } else return -ENOSPC; } out: child->thread.dbcr0 |= DBCR0_IDM; child->thread.regs->msr |= MSR_DE; return slot; } static int del_instruction_bp(struct task_struct *child, int slot) { switch (slot) { case 1: if ((child->thread.dbcr0 & DBCR0_IAC1) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC12MODE) { /* address range - clear slots 1 & 2 */ child->thread.iac2 = 0; dbcr_iac_range(child) &= ~DBCR_IAC12MODE; } child->thread.iac1 = 0; child->thread.dbcr0 &= ~DBCR0_IAC1; break; case 2: if ((child->thread.dbcr0 & DBCR0_IAC2) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC12MODE) /* used in a range */ return -EINVAL; child->thread.iac2 = 0; child->thread.dbcr0 &= ~DBCR0_IAC2; break; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 case 3: if ((child->thread.dbcr0 & DBCR0_IAC3) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC34MODE) { /* address range - clear slots 3 & 4 */ child->thread.iac4 = 0; dbcr_iac_range(child) &= ~DBCR_IAC34MODE; } child->thread.iac3 = 0; child->thread.dbcr0 &= ~DBCR0_IAC3; break; case 4: if ((child->thread.dbcr0 & DBCR0_IAC4) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC34MODE) /* Used in a range */ return -EINVAL; child->thread.iac4 = 0; child->thread.dbcr0 &= ~DBCR0_IAC4; break; #endif default: return -EINVAL; } return 0; } static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { int byte_enable = (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT) & 0xf; int condition_mode = bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE; int slot; if (byte_enable && (condition_mode == 0)) return -EINVAL; if (bp_info->addr >= TASK_SIZE) return -EIO; if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) { slot = 1; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) dbcr_dac(child) |= DBCR_DAC1R; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) dbcr_dac(child) |= DBCR_DAC1W; child->thread.dac1 = (unsigned long)bp_info->addr; #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 if (byte_enable) { child->thread.dvc1 = (unsigned long)bp_info->condition_value; child->thread.dbcr2 |= ((byte_enable << DBCR2_DVC1BE_SHIFT) | (condition_mode << DBCR2_DVC1M_SHIFT)); } #endif #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) { /* Both dac1 and dac2 are part of a range */ return -ENOSPC; #endif } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) { slot = 2; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) dbcr_dac(child) |= DBCR_DAC2R; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) dbcr_dac(child) |= DBCR_DAC2W; child->thread.dac2 = (unsigned long)bp_info->addr; #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 if (byte_enable) { child->thread.dvc2 = (unsigned long)bp_info->condition_value; child->thread.dbcr2 |= ((byte_enable << DBCR2_DVC2BE_SHIFT) | (condition_mode << DBCR2_DVC2M_SHIFT)); } #endif } else return -ENOSPC; child->thread.dbcr0 |= DBCR0_IDM; child->thread.regs->msr |= MSR_DE; return slot + 4; } static int del_dac(struct task_struct *child, int slot) { if (slot == 1) { if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) return -ENOENT; child->thread.dac1 = 0; dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W); #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE if (child->thread.dbcr2 & DBCR2_DAC12MODE) { child->thread.dac2 = 0; child->thread.dbcr2 &= ~DBCR2_DAC12MODE; } child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); #endif #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 child->thread.dvc1 = 0; #endif } else if (slot == 2) { if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) return -ENOENT; #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE if (child->thread.dbcr2 & DBCR2_DAC12MODE) /* Part of a range */ return -EINVAL; child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); #endif #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 child->thread.dvc2 = 0; #endif child->thread.dac2 = 0; dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W); } else return -EINVAL; return 0; } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE static int set_dac_range(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK; /* We don't allow range watchpoints to be used with DVC */ if (bp_info->condition_mode) return -EINVAL; /* * Best effort to verify the address range. The user/supervisor bits * prevent trapping in kernel space, but let's fail on an obvious bad * range. The simple test on the mask is not fool-proof, and any * exclusive range will spill over into kernel space. */ if (bp_info->addr >= TASK_SIZE) return -EIO; if (mode == PPC_BREAKPOINT_MODE_MASK) { /* * dac2 is a bitmask. Don't allow a mask that makes a * kernel space address from a valid dac1 value */ if (~((unsigned long)bp_info->addr2) >= TASK_SIZE) return -EIO; } else { /* * For range breakpoints, addr2 must also be a valid address */ if (bp_info->addr2 >= TASK_SIZE) return -EIO; } if (child->thread.dbcr0 & (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W)) return -ENOSPC; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); child->thread.dac1 = bp_info->addr; child->thread.dac2 = bp_info->addr2; if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) child->thread.dbcr2 |= DBCR2_DAC12M; else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) child->thread.dbcr2 |= DBCR2_DAC12MX; else /* PPC_BREAKPOINT_MODE_MASK */ child->thread.dbcr2 |= DBCR2_DAC12MM; child->thread.regs->msr |= MSR_DE; return 5; } #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */ static long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { #ifndef CONFIG_PPC_ADV_DEBUG_REGS unsigned long dabr; #endif if (bp_info->version != 1) return -ENOTSUPP; #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* * Check for invalid flags and combinations */ if ((bp_info->trigger_type == 0) || (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE | PPC_BREAKPOINT_TRIGGER_RW)) || (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) || (bp_info->condition_mode & ~(PPC_BREAKPOINT_CONDITION_MODE | PPC_BREAKPOINT_CONDITION_BE_ALL))) return -EINVAL; #if CONFIG_PPC_ADV_DEBUG_DVCS == 0 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) return -EINVAL; #endif if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) { if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) || (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) return -EINVAL; return set_intruction_bp(child, bp_info); } if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) return set_dac(child, bp_info); #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE return set_dac_range(child, bp_info); #else return -EINVAL; #endif #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */ /* * We only support one data breakpoint */ if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 || (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 || bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT || bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) return -EINVAL; if (child->thread.dabr) return -ENOSPC; if ((unsigned long)bp_info->addr >= TASK_SIZE) return -EIO; dabr = (unsigned long)bp_info->addr & ~7UL; dabr |= DABR_TRANSLATION; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) dabr |= DABR_DATA_READ; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) dabr |= DABR_DATA_WRITE; child->thread.dabr = dabr; return 1; #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ } static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS int rc; if (data <= 4) rc = del_instruction_bp(child, (int)data); else rc = del_dac(child, (int)data - 4); if (!rc) { if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0, child->thread.dbcr1)) { child->thread.dbcr0 &= ~DBCR0_IDM; child->thread.regs->msr &= ~MSR_DE; } } return rc; #else if (data != 1) return -EINVAL; if (child->thread.dabr == 0) return -ENOENT; child->thread.dabr = 0; return 0; #endif } /* * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, * we mark them as obsolete now, they will be removed in a future version */ static long arch_ptrace_old(struct task_struct *child, long request, unsigned long addr, unsigned long data) { void __user *datavp = (void __user *) data; switch (request) { case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_GPR, 0, 32 * sizeof(long), datavp); case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_GPR, 0, 32 * sizeof(long), datavp); case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_FPR, 0, 32 * sizeof(double), datavp); case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_FPR, 0, 32 * sizeof(double), datavp); } return -EPERM; } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret = -EPERM; void __user *datavp = (void __user *) data; unsigned long __user *datalp = datavp; switch (request) { /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { unsigned long index, tmp; ret = -EIO; /* convert to index and check */ #ifdef CONFIG_PPC32 index = addr >> 2; if ((addr & 3) || (index > PT_FPSCR) || (child->thread.regs == NULL)) #else index = addr >> 3; if ((addr & 7) || (index > PT_FPSCR)) #endif break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { tmp = ptrace_get_reg(child, (int) index); } else { flush_fp_to_thread(child); tmp = ((unsigned long *)child->thread.fpr) [TS_FPRWIDTH * (index - PT_FPR0)]; } ret = put_user(tmp, datalp); break; } /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: { unsigned long index; ret = -EIO; /* convert to index and check */ #ifdef CONFIG_PPC32 index = addr >> 2; if ((addr & 3) || (index > PT_FPSCR) || (child->thread.regs == NULL)) #else index = addr >> 3; if ((addr & 7) || (index > PT_FPSCR)) #endif break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { flush_fp_to_thread(child); ((unsigned long *)child->thread.fpr) [TS_FPRWIDTH * (index - PT_FPR0)] = data; ret = 0; } break; } case PPC_PTRACE_GETHWDBGINFO: { struct ppc_debug_info dbginfo; dbginfo.version = 1; #ifdef CONFIG_PPC_ADV_DEBUG_REGS dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS; dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS; dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS; dbginfo.data_bp_alignment = 4; dbginfo.sizeof_condition = 4; dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE | PPC_DEBUG_FEATURE_INSN_BP_MASK; #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_RANGE | PPC_DEBUG_FEATURE_DATA_BP_MASK; #endif #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ dbginfo.num_instruction_bps = 0; dbginfo.num_data_bps = 1; dbginfo.num_condition_regs = 0; #ifdef CONFIG_PPC64 dbginfo.data_bp_alignment = 8; #else dbginfo.data_bp_alignment = 4; #endif dbginfo.sizeof_condition = 0; dbginfo.features = 0; #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ if (!access_ok(VERIFY_WRITE, datavp, sizeof(struct ppc_debug_info))) return -EFAULT; ret = __copy_to_user(datavp, &dbginfo, sizeof(struct ppc_debug_info)) ? -EFAULT : 0; break; } case PPC_PTRACE_SETHWDEBUG: { struct ppc_hw_breakpoint bp_info; if (!access_ok(VERIFY_READ, datavp, sizeof(struct ppc_hw_breakpoint))) return -EFAULT; ret = __copy_from_user(&bp_info, datavp, sizeof(struct ppc_hw_breakpoint)) ? -EFAULT : 0; if (!ret) ret = ppc_set_hwdebug(child, &bp_info); break; } case PPC_PTRACE_DELHWDEBUG: { ret = ppc_del_hwdebug(child, addr, data); break; } case PTRACE_GET_DEBUGREG: { ret = -EINVAL; /* We only support one DABR and no IABRS at the moment */ if (addr > 0) break; #ifdef CONFIG_PPC_ADV_DEBUG_REGS ret = put_user(child->thread.dac1, datalp); #else ret = put_user(child->thread.dabr, datalp); #endif break; } case PTRACE_SET_DEBUGREG: ret = ptrace_set_debugreg(child, addr, data); break; #ifdef CONFIG_PPC64 case PTRACE_GETREGS64: #endif case PTRACE_GETREGS: /* Get all pt_regs from the child. */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_GPR, 0, sizeof(struct pt_regs), datavp); #ifdef CONFIG_PPC64 case PTRACE_SETREGS64: #endif case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_GPR, 0, sizeof(struct pt_regs), datavp); case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_FPR, 0, sizeof(elf_fpregset_t), datavp); case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_FPR, 0, sizeof(elf_fpregset_t), datavp); #ifdef CONFIG_ALTIVEC case PTRACE_GETVRREGS: return copy_regset_to_user(child, &user_ppc_native_view, REGSET_VMX, 0, (33 * sizeof(vector128) + sizeof(u32)), datavp); case PTRACE_SETVRREGS: return copy_regset_from_user(child, &user_ppc_native_view, REGSET_VMX, 0, (33 * sizeof(vector128) + sizeof(u32)), datavp); #endif #ifdef CONFIG_VSX case PTRACE_GETVSRREGS: return copy_regset_to_user(child, &user_ppc_native_view, REGSET_VSX, 0, 32 * sizeof(double), datavp); case PTRACE_SETVSRREGS: return copy_regset_from_user(child, &user_ppc_native_view, REGSET_VSX, 0, 32 * sizeof(double), datavp); #endif #ifdef CONFIG_SPE case PTRACE_GETEVRREGS: /* Get the child spe register state. */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_SPE, 0, 35 * sizeof(u32), datavp); case PTRACE_SETEVRREGS: /* Set the child spe register state. */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_SPE, 0, 35 * sizeof(u32), datavp); #endif /* Old reverse args ptrace callss */ case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */ ret = arch_ptrace_old(child, request, addr, data); break; default: ret = ptrace_request(child, request, addr, data); break; } return ret; } /* * We must return the syscall number to actually look up in the table. * This can be -1L to skip running any syscall at all. */ long do_syscall_trace_enter(struct pt_regs *regs) { long ret = 0; secure_computing(regs->gpr[0]); if (test_thread_flag(TIF_SYSCALL_TRACE) && tracehook_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS * error, but leave the original number in regs->gpr[0]. */ ret = -1L; if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->gpr[0]); if (unlikely(current->audit_context)) { #ifdef CONFIG_PPC64 if (!is_32bit_task()) audit_syscall_entry(AUDIT_ARCH_PPC64, regs->gpr[0], regs->gpr[3], regs->gpr[4], regs->gpr[5], regs->gpr[6]); else #endif audit_syscall_entry(AUDIT_ARCH_PPC, regs->gpr[0], regs->gpr[3] & 0xffffffff, regs->gpr[4] & 0xffffffff, regs->gpr[5] & 0xffffffff, regs->gpr[6] & 0xffffffff); } return ret ?: regs->gpr[0]; } void do_syscall_trace_leave(struct pt_regs *regs) { int step; if (unlikely(current->audit_context)) audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, regs->result); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_exit(regs, regs->result); step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_15
crossvul-cpp_data_bad_945_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO GGGGG RRRR IIIII FFFFF Y Y % % MM MM O O G R R I F Y Y % % M M M O O G GGG RRRR I FFF Y % % M M O O G G R R I F Y % % M M OOO GGGG R R IIIII F Y % % % % % % MagickWand Module Methods % % % % Software Design % % Cristy % % March 2000 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use the mogrify program to resize an image, blur, crop, despeckle, dither, % draw on, flip, join, re-sample, and much more. This tool is similiar to % convert except that the original image file is overwritten (unless you % change the file suffix with the -format option) with any changes you % request. % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/mogrify-private.h" #include "magick/blob-private.h" #include "magick/color-private.h" #include "magick/image-private.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/thread-private.h" #include "magick/string-private.h" #include "magick/timer-private.h" #include "magick/utility-private.h" #if defined(MAGICKCORE_HAVE_UTIME_H) #include <utime.h> #endif /* Define declarations. */ #define UndefinedCompressionQuality 0UL /* Constant declaration. */ static const char MogrifyBackgroundColor[] = "#fff", /* white */ MogrifyBorderColor[] = "#dfdfdf", /* sRGB gray */ MogrifyMatteColor[] = "#bdbdbd"; /* slightly darker gray */ /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o m m a n d G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickCommandGenesis() applies image processing options to an image as % prescribed by command line options. % % The format of the MagickCommandGenesis method is: % % MagickBooleanType MagickCommandGenesis(ImageInfo *image_info, % MagickCommand command,int argc,char **argv,char **metadata, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o command: Choose from ConvertImageCommand, IdentifyImageCommand, % MogrifyImageCommand, CompositeImageCommand, CompareImageCommand, % ConjureImageCommand, StreamImageCommand, ImportImageCommand, % DisplayImageCommand, or AnimateImageCommand. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o metadata: any metadata is returned here. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MagickCommandGenesis(ImageInfo *image_info, MagickCommand command,int argc,char **argv,char **metadata, ExceptionInfo *exception) { char *option; double duration, serial; MagickBooleanType concurrent, regard_warnings, status; register ssize_t i; size_t iterations, number_threads; ssize_t n; (void) setlocale(LC_ALL,""); (void) setlocale(LC_NUMERIC,"C"); concurrent=MagickFalse; duration=(-1.0); iterations=1; status=MagickTrue; regard_warnings=MagickFalse; for (i=1; i < (ssize_t) (argc-1); i++) { option=argv[i]; if ((strlen(option) == 1) || ((*option != '-') && (*option != '+'))) continue; if (LocaleCompare("bench",option+1) == 0) iterations=StringToUnsignedLong(argv[++i]); if (LocaleCompare("concurrent",option+1) == 0) concurrent=MagickTrue; if (LocaleCompare("debug",option+1) == 0) (void) SetLogEventMask(argv[++i]); if (LocaleCompare("distribute-cache",option+1) == 0) { DistributePixelCacheServer(StringToInteger(argv[++i]),exception); exit(0); } if (LocaleCompare("duration",option+1) == 0) duration=StringToDouble(argv[++i],(char **) NULL); if (LocaleCompare("regard-warnings",option+1) == 0) regard_warnings=MagickTrue; } if (iterations == 1) { status=command(image_info,argc,argv,metadata,exception); if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if ((metadata != (char **) NULL) && (*metadata != (char *) NULL)) { (void) fputs(*metadata,stdout); *metadata=DestroyString(*metadata); } return(status); } number_threads=GetOpenMPMaximumThreads(); serial=0.0; for (n=1; n <= (ssize_t) number_threads; n++) { double e, parallel, user_time; TimerInfo *timer; (void) SetMagickResourceLimit(ThreadResource,(MagickSizeType) n); timer=AcquireTimerInfo(); if (concurrent == MagickFalse) { for (i=0; i < (ssize_t) iterations; i++) { if (status == MagickFalse) continue; if (duration > 0) { if (GetElapsedTime(timer) > duration) continue; (void) ContinueTimer(timer); } status=command(image_info,argc,argv,metadata,exception); if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if ((metadata != (char **) NULL) && (*metadata != (char *) NULL)) { (void) fputs(*metadata,stdout); *metadata=DestroyString(*metadata); } } } else { SetOpenMPNested(1); #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp parallel for shared(status) #endif for (i=0; i < (ssize_t) iterations; i++) { if (status == MagickFalse) continue; if (duration > 0) { if (GetElapsedTime(timer) > duration) continue; (void) ContinueTimer(timer); } status=command(image_info,argc,argv,metadata,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_MagickCommandGenesis) #endif { if (exception->severity != UndefinedException) { if ((exception->severity > ErrorException) || (regard_warnings != MagickFalse)) status=MagickFalse; CatchException(exception); } if ((metadata != (char **) NULL) && (*metadata != (char *) NULL)) { (void) fputs(*metadata,stdout); *metadata=DestroyString(*metadata); } } } } user_time=GetUserTime(timer); parallel=GetElapsedTime(timer); e=1.0; if (n == 1) serial=parallel; else e=((1.0/(1.0/((serial/(serial+parallel))+(1.0-(serial/(serial+parallel)))/ (double) n)))-(1.0/(double) n))/(1.0-1.0/(double) n); (void) FormatLocaleFile(stderr, " Performance[%.20g]: %.20gi %0.3fips %0.6fe %0.6fu %lu:%02lu.%03lu\n", (double) n,(double) iterations,(double) iterations/parallel,e,user_time, (unsigned long) (parallel/60.0),(unsigned long) floor(fmod(parallel, 60.0)),(unsigned long) (1000.0*(parallel-floor(parallel))+0.5)); timer=DestroyTimerInfo(timer); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImage() applies simple single image processing options to a single % image that may be part of a large list, but also handles any 'region' % image handling. % % The image in the list may be modified in three different ways... % % * directly modified (EG: -negate, -gamma, -level, -annotate, -draw), % * replaced by a new image (EG: -spread, -resize, -rotate, -morphology) % * replace by a list of images (only the -separate option!) % % In each case the result is returned into the list, and a pointer to the % modified image (last image added if replaced by a list of images) is % returned. % % ASIDE: The -crop is present but restricted to non-tile single image crops % % This means if all the images are being processed (such as by % MogrifyImages(), next image to be processed will be as per the pointer % (*image)->next. Also the image list may grow as a result of some specific % operations but as images are never merged or deleted, it will never shrink % in length. Typically the list will remain the same length. % % WARNING: As the image pointed to may be replaced, the first image in the % list may also change. GetFirstImageInList() should be used by caller if % they wish return the Image pointer to the first image in list. % % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc, % const char **argv,Image **image) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Image *GetImageCache(const ImageInfo *image_info,const char *path, ExceptionInfo *exception) { char key[MaxTextExtent]; ExceptionInfo *sans_exception; Image *image; ImageInfo *read_info; /* Read an image into a image cache if not already present. Return the image that is in the cache under that filename. */ (void) FormatLocaleString(key,MaxTextExtent,"cache:%s",path); sans_exception=AcquireExceptionInfo(); image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (image != (Image *) NULL) return(image); read_info=CloneImageInfo(image_info); (void) CopyMagickString(read_info->filename,path,MaxTextExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) (void) SetImageRegistry(ImageRegistryType,key,image,exception); return(image); } static inline MagickBooleanType IsPathWritable(const char *path) { if (IsPathAccessible(path) == MagickFalse) return(MagickFalse); if (access_utf8(path,W_OK) != 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType MonitorProgress(const char *text, const MagickOffsetType offset,const MagickSizeType extent, void *wand_unused(client_data)) { char message[MaxTextExtent], tag[MaxTextExtent]; const char *locale_message; register char *p; wand_unreferenced(client_data); if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent)) return(MagickTrue); if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0)) return(MagickTrue); (void) CopyMagickString(tag,text,MaxTextExtent); p=strrchr(tag,'/'); if (p != (char *) NULL) *p='\0'; (void) FormatLocaleString(message,MaxTextExtent,"Monitor/%s",tag); locale_message=GetLocaleMessage(message); if (locale_message == message) locale_message=tag; if (p == (char *) NULL) (void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r", locale_message,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); else (void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r", locale_message,p+1,(long) offset,(unsigned long) extent,(long) (100L*offset/(extent-1))); if (offset == (MagickOffsetType) (extent-1)) (void) FormatLocaleFile(stderr,"\n"); (void) fflush(stderr); return(MagickTrue); } static Image *SparseColorOption(const Image *image,const ChannelType channel, const SparseColorMethod method,const char *arguments, const MagickBooleanType color_from_image,ExceptionInfo *exception) { ChannelType channels; char token[MaxTextExtent]; const char *p; double *sparse_arguments; Image *sparse_image; MagickBooleanType error; MagickPixelPacket color; register size_t x; size_t number_arguments, number_colors; /* SparseColorOption() parses the complex -sparse-color argument into an an array of floating point values then calls SparseColorImage(). Argument is a complex mix of floating-point pixel coodinates, and color specifications (or direct floating point numbers). The number of floats needed to represent a color varies depending on the current channel setting. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Limit channels according to image - and add up number of color channel. */ channels=channel; if (image->colorspace != CMYKColorspace) channels=(ChannelType) (channels & ~IndexChannel); /* no index channel */ if (image->matte == MagickFalse) channels=(ChannelType) (channels & ~OpacityChannel); /* no alpha channel */ number_colors=0; if ((channels & RedChannel) != 0) number_colors++; if ((channels & GreenChannel) != 0) number_colors++; if ((channels & BlueChannel) != 0) number_colors++; if ((channels & IndexChannel) != 0) number_colors++; if ((channels & OpacityChannel) != 0) number_colors++; /* Read string, to determine number of arguments needed, */ p=arguments; x=0; while( *p != '\0' ) { GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == ',' ) continue; if ( isalpha((int) token[0]) || token[0] == '#' ) { if ( color_from_image ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "`%s': %s", "sparse-color", "Color arg given, when colors are coming from image"); return( (Image *) NULL); } x += number_colors; /* color argument */ } else { x++; /* floating point argument */ } } error=MagickTrue; if ( color_from_image ) { /* just the control points are being given */ error = ( x % 2 != 0 ) ? MagickTrue : MagickFalse; number_arguments=(x/2)*(2+number_colors); } else { /* control points and color values */ error = ( x % (2+number_colors) != 0 ) ? MagickTrue : MagickFalse; number_arguments=x; } if ( error ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "`%s': %s", "sparse-color", "Invalid number of Arguments"); return( (Image *) NULL); } /* Allocate and fill in the floating point arguments */ sparse_arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*sparse_arguments)); if (sparse_arguments == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError, " MemoryAllocationFailed\n""%s","SparseColorOption"); return( (Image *) NULL); } (void) memset(sparse_arguments,0,number_arguments* sizeof(*sparse_arguments)); p=arguments; x=0; while( *p != '\0' && x < number_arguments ) { /* X coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "`%s': %s", "sparse-color", "Color found, instead of X-coord"); error = MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* Y coordinate */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError, "InvalidArgument", "`%s': %s", "sparse-color", "Color found, instead of Y-coord"); error = MagickTrue; break; } sparse_arguments[x++]=StringToDouble(token,(char **) NULL); /* color values for this control point */ #if 0 if ( (color_from_image ) { /* get color from image */ /* HOW??? */ } else #endif { /* color name or function given in string argument */ token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' ) break; if ( isalpha((int) token[0]) || token[0] == '#' ) { /* Color string given */ (void) QueryMagickColor(token,&color,exception); if ( channels & RedChannel ) sparse_arguments[x++] = QuantumScale*color.red; if ( channels & GreenChannel ) sparse_arguments[x++] = QuantumScale*color.green; if ( channels & BlueChannel ) sparse_arguments[x++] = QuantumScale*color.blue; if ( channels & IndexChannel ) sparse_arguments[x++] = QuantumScale*color.index; if ( channels & OpacityChannel ) sparse_arguments[x++] = QuantumScale*color.opacity; } else { /* Colors given as a set of floating point values - experimental */ /* NB: token contains the first floating point value to use! */ if ( channels & RedChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ( channels & GreenChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ( channels & BlueChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ( channels & IndexChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } if ( channels & OpacityChannel ) { while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token); if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' ) break; sparse_arguments[x++]=StringToDouble(token,(char **) NULL); token[0] = ','; /* used this token - get another */ } } } } if ( number_arguments != x && !error ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, " InvalidArgument","`%s': %s","sparse-color","Argument Parsing Error"); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( (Image *) NULL); } if ( error ) return( (Image *) NULL); /* Call the Interpolation function with the parsed arguments */ sparse_image=SparseColorImage(image,channels,method,number_arguments, sparse_arguments,exception); sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments); return( sparse_image ); } WandExport MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc, const char **argv,Image **image,ExceptionInfo *exception) { ChannelType channel; const char *format, *option; DrawInfo *draw_info; GeometryInfo geometry_info; Image *region_image; ImageInfo *mogrify_info; MagickStatusType status; MagickPixelPacket fill; MagickStatusType flags; QuantizeInfo *quantize_info; RectangleInfo geometry, region_geometry; register ssize_t i; /* Initialize method variables. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (argc < 0) return(MagickTrue); mogrify_info=CloneImageInfo(image_info); draw_info=CloneDrawInfo(mogrify_info,(DrawInfo *) NULL); quantize_info=AcquireQuantizeInfo(mogrify_info); SetGeometryInfo(&geometry_info); GetMagickPixelPacket(*image,&fill); SetMagickPixelPacket(*image,&(*image)->background_color,(IndexPacket *) NULL, &fill); channel=mogrify_info->channel; format=GetImageOption(mogrify_info,"format"); SetGeometry(*image,&region_geometry); region_image=NewImageList(); /* Transmogrify the image. */ for (i=0; i < (ssize_t) argc; i++) { Image *mogrify_image; ssize_t count; option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=MagickMax(ParseCommandOption(MagickCommandOptions,MagickFalse,option), 0L); if ((i+count) >= (ssize_t) argc) break; status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception); mogrify_image=(Image *) NULL; switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { /* Adaptive blur image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=AdaptiveBlurImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { /* Adaptive resize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=AdaptiveResizeImage(*image,geometry.width, geometry.height,exception); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { /* Adaptive sharpen image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=AdaptiveSharpenImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("affine",option+1) == 0) { /* Affine matrix. */ if (*option == '+') { GetAffineMatrix(&draw_info->affine); break; } (void) ParseAffineGeometry(argv[i+1],&draw_info->affine,exception); break; } if (LocaleCompare("alpha",option+1) == 0) { AlphaChannelType alpha_type; (void) SyncImageSettings(mogrify_info,*image); alpha_type=(AlphaChannelType) ParseCommandOption(MagickAlphaOptions, MagickFalse,argv[i+1]); (void) SetImageAlphaChannel(*image,alpha_type); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("annotate",option+1) == 0) { char *text, geometry[MaxTextExtent]; /* Annotate image. */ (void) SyncImageSettings(mogrify_info,*image); SetGeometryInfo(&geometry_info); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; text=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (text == (char *) NULL) break; (void) CloneString(&draw_info->text,text); text=DestroyString(text); (void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f", geometry_info.xi,geometry_info.psi); (void) CloneString(&draw_info->geometry,geometry); draw_info->affine.sx=cos(DegreesToRadians( fmod(geometry_info.rho,360.0))); draw_info->affine.rx=sin(DegreesToRadians( fmod(geometry_info.rho,360.0))); draw_info->affine.ry=(-sin(DegreesToRadians( fmod(geometry_info.sigma,360.0)))); draw_info->affine.sy=cos(DegreesToRadians( fmod(geometry_info.sigma,360.0))); (void) AnnotateImage(*image,draw_info); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("antialias",option+1) == 0) { draw_info->stroke_antialias=(*option == '-') ? MagickTrue : MagickFalse; draw_info->text_antialias=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("auto-gamma",option+1) == 0) { /* Auto Adjust Gamma of image based on its mean */ (void) SyncImageSettings(mogrify_info,*image); (void) AutoGammaImageChannel(*image,channel); break; } if (LocaleCompare("auto-level",option+1) == 0) { /* Perfectly Normalize (max/min stretch) the image */ (void) SyncImageSettings(mogrify_info,*image); (void) AutoLevelImageChannel(*image,channel); break; } if (LocaleCompare("auto-orient",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); mogrify_image=AutoOrientImage(*image,(*image)->orientation, exception); break; } break; } case 'b': { if (LocaleCompare("black-threshold",option+1) == 0) { /* Black threshold image. */ (void) SyncImageSettings(mogrify_info,*image); (void) BlackThresholdImageChannel(*image,channel,argv[i+1], exception); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("blue-shift",option+1) == 0) { /* Blue shift image. */ (void) SyncImageSettings(mogrify_info,*image); geometry_info.rho=1.5; if (*option == '-') flags=ParseGeometry(argv[i+1],&geometry_info); mogrify_image=BlueShiftImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("blur",option+1) == 0) { /* Gaussian blur image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=BlurImageChannel(*image,channel,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("border",option+1) == 0) { /* Surround image with a border of solid color. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=BorderImage(*image,&geometry,exception); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') { (void) QueryColorDatabase(MogrifyBorderColor, &draw_info->border_color,exception); break; } (void) QueryColorDatabase(argv[i+1],&draw_info->border_color, exception); break; } if (LocaleCompare("box",option+1) == 0) { (void) QueryColorDatabase(argv[i+1],&draw_info->undercolor, exception); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { double brightness, contrast; GeometryInfo geometry_info; MagickStatusType flags; /* Brightness / contrast image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); brightness=geometry_info.rho; contrast=0.0; if ((flags & SigmaValue) != 0) contrast=geometry_info.sigma; (void) BrightnessContrastImageChannel(*image,channel,brightness, contrast); InheritException(exception,&(*image)->exception); break; } break; } case 'c': { if (LocaleCompare("canny",option+1) == 0) { /* Detect edges in the image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.10; if ((flags & PsiValue) == 0) geometry_info.psi=0.30; if ((flags & PercentValue) != 0) { geometry_info.xi/=100.0; geometry_info.psi/=100.0; } mogrify_image=CannyEdgeImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception); break; } if (LocaleCompare("cdl",option+1) == 0) { char *color_correction_collection; /* Color correct with a color decision list. */ (void) SyncImageSettings(mogrify_info,*image); color_correction_collection=FileToString(argv[i+1],~0UL,exception); if (color_correction_collection == (char *) NULL) break; (void) ColorDecisionListImage(*image,color_correction_collection); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("channel",option+1) == 0) { if (*option == '+') channel=DefaultChannels; else channel=(ChannelType) ParseChannelOption(argv[i+1]); break; } if (LocaleCompare("charcoal",option+1) == 0) { /* Charcoal image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=CharcoalImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("chop",option+1) == 0) { /* Chop the image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ChopImage(*image,&geometry,exception); break; } if (LocaleCompare("clamp",option+1) == 0) { /* Clamp image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ClampImageChannel(*image,channel); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("clip",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { (void) SetImageClipMask(*image,(Image *) NULL); InheritException(exception,&(*image)->exception); break; } (void) ClipImage(*image); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("clip-mask",option+1) == 0) { CacheView *mask_view; Image *mask_image; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t y; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,(Image *) NULL); InheritException(exception,&(*image)->exception); break; } /* Set the image mask. FUTURE: This Should Be a SetImageAlphaChannel() call, Or two. */ mask_image=GetImageCache(mogrify_info,argv[i+1],exception); if (mask_image == (Image *) NULL) break; if (SetImageStorageClass(mask_image,DirectClass) == MagickFalse) return(MagickFalse); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) mask_image->rows; y++) { q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) mask_image->columns; x++) { if (mask_image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(GetPixelIntensity(mask_image, q))); SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) break; } mask_view=DestroyCacheView(mask_view); mask_image->matte=MagickTrue; (void) SetImageClipMask(*image,mask_image); mask_image=DestroyImage(mask_image); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("clip-path",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) ClipImagePath(*image,argv[i+1],*option == '-' ? MagickTrue : MagickFalse); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("colorize",option+1) == 0) { /* Colorize the image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=ColorizeImage(*image,argv[i+1],draw_info->fill, exception); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel; (void) SyncImageSettings(mogrify_info,*image); kernel=AcquireKernelInfo(argv[i+1]); if (kernel == (KernelInfo *) NULL) break; mogrify_image=ColorMatrixImage(*image,kernel,exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("colors",option+1) == 0) { /* Reduce the number of colors in the image. */ (void) SyncImageSettings(mogrify_info,*image); quantize_info->number_colors=StringToUnsignedLong(argv[i+1]); if (quantize_info->number_colors == 0) break; if (((*image)->storage_class == DirectClass) || (*image)->colors > quantize_info->number_colors) (void) QuantizeImage(quantize_info,*image); else (void) CompressImageColormap(*image); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("colorspace",option+1) == 0) { ColorspaceType colorspace; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { (void) TransformImageColorspace(*image,sRGBColorspace); InheritException(exception,&(*image)->exception); break; } colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); (void) TransformImageColorspace(*image,colorspace); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("connected-components",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); mogrify_image=ConnectedComponentsImage(*image, StringToInteger(argv[i+1]),exception); break; } if (LocaleCompare("contrast",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) ContrastImage(*image,(*option == '-') ? MagickTrue : MagickFalse); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("contrast-stretch",option+1) == 0) { double black_point, white_point; MagickStatusType flags; /* Contrast stretch image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma : black_point; if ((flags & PercentValue) != 0) { black_point*=(double) (*image)->columns*(*image)->rows/100.0; white_point*=(double) (*image)->columns*(*image)->rows/100.0; } white_point=(MagickRealType) (*image)->columns*(*image)->rows- white_point; (void) ContrastStretchImageChannel(*image,channel,black_point, white_point); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("convolve",option+1) == 0) { double gamma; KernelInfo *kernel_info; register ssize_t j; size_t extent; (void) SyncImageSettings(mogrify_info,*image); kernel_info=AcquireKernelInfo(argv[i+1]); if (kernel_info == (KernelInfo *) NULL) break; extent=kernel_info->width*kernel_info->height; gamma=0.0; for (j=0; j < (ssize_t) extent; j++) gamma+=kernel_info->values[j]; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); for (j=0; j < (ssize_t) extent; j++) kernel_info->values[j]*=gamma; mogrify_image=MorphologyImage(*image,CorrelateMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("crop",option+1) == 0) { /* Crop a image to a smaller size */ (void) SyncImageSettings(mogrify_info,*image); #if 0 flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception); if (((geometry.width != 0) || (geometry.height != 0)) && ((flags & XValue) == 0) && ((flags & YValue) == 0)) break; #endif #if 0 mogrify_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception); mogrify_image->next = mogrify_image->previous = (Image *) NULL; (void) TransformImage(&mogrify_image,argv[i+1],(char *) NULL); InheritException(exception,&mogrify_image->exception); #else mogrify_image=CropImageToTiles(*image,argv[i+1],exception); #endif break; } if (LocaleCompare("cycle",option+1) == 0) { /* Cycle an image colormap. */ (void) SyncImageSettings(mogrify_info,*image); (void) CycleColormapImage(*image,(ssize_t) StringToLong(argv[i+1])); InheritException(exception,&(*image)->exception); break; } break; } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { StringInfo *passkey; /* Decipher pixels. */ (void) SyncImageSettings(mogrify_info,*image); passkey=FileToStringInfo(argv[i+1],~0UL,exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyDecipherImage(*image,passkey,exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("density",option+1) == 0) { /* Set image density. */ (void) CloneString(&draw_info->density,argv[i+1]); break; } if (LocaleCompare("depth",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { (void) SetImageDepth(*image,MAGICKCORE_QUANTUM_DEPTH); break; } (void) SetImageDepth(*image,StringToUnsignedLong(argv[i+1])); break; } if (LocaleCompare("deskew",option+1) == 0) { double threshold; /* Straighten the image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') threshold=40.0*QuantumRange/100.0; else threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); mogrify_image=DeskewImage(*image,threshold,exception); break; } if (LocaleCompare("despeckle",option+1) == 0) { /* Reduce the speckles within an image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=DespeckleImage(*image,exception); break; } if (LocaleCompare("display",option+1) == 0) { (void) CloneString(&draw_info->server_name,argv[i+1]); break; } if (LocaleCompare("distort",option+1) == 0) { char *args, token[MaxTextExtent]; const char *p; DistortImageMethod method; double *arguments; register ssize_t x; size_t number_arguments; /* Distort image. */ (void) SyncImageSettings(mogrify_info,*image); method=(DistortImageMethod) ParseCommandOption(MagickDistortOptions, MagickFalse,argv[i+1]); if (method == ResizeDistortion) { double resize_args[2]; /* Resize distortion. */ (void) ParseRegionGeometry(*image,argv[i+2],&geometry, exception); resize_args[0]=(double) geometry.width; resize_args[1]=(double) geometry.height; mogrify_image=DistortImage(*image,method,(size_t) 2, resize_args,MagickTrue,exception); break; } args=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (args == (char *) NULL) break; p=(char *) args; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); } number_arguments=(size_t) x; arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*arguments)); if (arguments == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*image)->filename); (void) memset(arguments,0,number_arguments*sizeof(*arguments)); p=(char *) args; for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arguments[x]=StringToDouble(token,(char **) NULL); } args=DestroyString(args); mogrify_image=DistortImage(*image,method,number_arguments,arguments, (*option == '+') ? MagickTrue : MagickFalse,exception); arguments=(double *) RelinquishMagickMemory(arguments); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { quantize_info->dither=MagickFalse; break; } quantize_info->dither=MagickTrue; quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,argv[i+1]); if (quantize_info->dither_method == NoDitherMethod) quantize_info->dither=MagickFalse; break; } if (LocaleCompare("draw",option+1) == 0) { /* Draw image. */ (void) SyncImageSettings(mogrify_info,*image); (void) CloneString(&draw_info->primitive,argv[i+1]); (void) DrawImage(*image,draw_info); InheritException(exception,&(*image)->exception); break; } break; } case 'e': { if (LocaleCompare("edge",option+1) == 0) { /* Enhance edges in the image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=EdgeImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("emboss",option+1) == 0) { /* Gaussian embossen image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=EmbossImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("encipher",option+1) == 0) { StringInfo *passkey; /* Encipher pixels. */ (void) SyncImageSettings(mogrify_info,*image); passkey=FileToStringInfo(argv[i+1],~0UL,exception); if (passkey != (StringInfo *) NULL) { (void) PasskeyEncipherImage(*image,passkey,exception); passkey=DestroyStringInfo(passkey); } break; } if (LocaleCompare("encoding",option+1) == 0) { (void) CloneString(&draw_info->encoding,argv[i+1]); break; } if (LocaleCompare("enhance",option+1) == 0) { /* Enhance image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=EnhanceImage(*image,exception); break; } if (LocaleCompare("equalize",option+1) == 0) { /* Equalize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) EqualizeImageChannel(*image,channel); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("evaluate",option+1) == 0) { double constant; MagickEvaluateOperator op; (void) SyncImageSettings(mogrify_info,*image); op=(MagickEvaluateOperator) ParseCommandOption( MagickEvaluateOptions,MagickFalse,argv[i+1]); constant=StringToDoubleInterval(argv[i+2],(double) QuantumRange+ 1.0); (void) EvaluateImageChannel(*image,channel,op,constant,exception); break; } if (LocaleCompare("extent",option+1) == 0) { /* Set the image extent. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception); if (geometry.width == 0) geometry.width=(*image)->columns; if (geometry.height == 0) geometry.height=(*image)->rows; mogrify_image=ExtentImage(*image,&geometry,exception); break; } break; } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option == '+') { if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); break; } (void) CloneString(&draw_info->family,argv[i+1]); break; } if (LocaleCompare("features",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:features"); break; } (void) SetImageArtifact(*image,"identify:features",argv[i+1]); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("fill",option+1) == 0) { ExceptionInfo *sans; GetMagickPixelPacket(*image,&fill); if (*option == '+') { (void) QueryMagickColor("none",&fill,exception); (void) QueryColorDatabase("none",&draw_info->fill,exception); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); break; } sans=AcquireExceptionInfo(); (void) QueryMagickColor(argv[i+1],&fill,sans); status=QueryColorDatabase(argv[i+1],&draw_info->fill,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1], exception); break; } if (LocaleCompare("flip",option+1) == 0) { /* Flip image scanlines. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=FlipImage(*image,exception); break; } if (LocaleCompare("floodfill",option+1) == 0) { MagickPixelPacket target; /* Floodfill image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParsePageGeometry(*image,argv[i+1],&geometry,exception); (void) QueryMagickColor(argv[i+2],&target,exception); (void) FloodfillPaintImage(*image,channel,draw_info,&target, geometry.x,geometry.y,*option == '-' ? MagickFalse : MagickTrue); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("flop",option+1) == 0) { /* Flop image scanlines. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=FlopImage(*image,exception); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') { if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); break; } (void) CloneString(&draw_info->font,argv[i+1]); break; } if (LocaleCompare("format",option+1) == 0) { format=argv[i+1]; break; } if (LocaleCompare("frame",option+1) == 0) { FrameInfo frame_info; /* Surround image with an ornamental border. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); frame_info.width=geometry.width; frame_info.height=geometry.height; frame_info.outer_bevel=geometry.x; frame_info.inner_bevel=geometry.y; frame_info.x=(ssize_t) frame_info.width; frame_info.y=(ssize_t) frame_info.height; frame_info.width=(*image)->columns+2*frame_info.width; frame_info.height=(*image)->rows+2*frame_info.height; mogrify_image=FrameImage(*image,&frame_info,exception); break; } if (LocaleCompare("function",option+1) == 0) { char *arguments, token[MaxTextExtent]; const char *p; double *parameters; MagickFunction function; register ssize_t x; size_t number_parameters; /* Function Modify Image Values */ (void) SyncImageSettings(mogrify_info,*image); function=(MagickFunction) ParseCommandOption(MagickFunctionOptions, MagickFalse,argv[i+1]); arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (arguments == (char *) NULL) break; p=(char *) arguments; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); } number_parameters=(size_t) x; parameters=(double *) AcquireQuantumMemory(number_parameters, sizeof(*parameters)); if (parameters == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*image)->filename); (void) memset(parameters,0,number_parameters* sizeof(*parameters)); p=(char *) arguments; for (x=0; (x < (ssize_t) number_parameters) && (*p != '\0'); x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); parameters[x]=StringToDouble(token,(char **) NULL); } arguments=DestroyString(arguments); (void) FunctionImageChannel(*image,channel,function, number_parameters,parameters,exception); parameters=(double *) RelinquishMagickMemory(parameters); break; } break; } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { /* Gamma image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') (*image)->gamma=StringToDouble(argv[i+1],(char **) NULL); else { if (strchr(argv[i+1],',') != (char *) NULL) (void) GammaImage(*image,argv[i+1]); else (void) GammaImageChannel(*image,channel, StringToDouble(argv[i+1],(char **) NULL)); InheritException(exception,&(*image)->exception); } break; } if ((LocaleCompare("gaussian-blur",option+1) == 0) || (LocaleCompare("gaussian",option+1) == 0)) { /* Gaussian blur image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=GaussianBlurImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("geometry",option+1) == 0) { /* Record Image offset, Resize last image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { if ((*image)->geometry != (char *) NULL) (*image)->geometry=DestroyString((*image)->geometry); break; } flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) CloneString(&(*image)->geometry,argv[i+1]); else mogrify_image=ResizeImage(*image,geometry.width,geometry.height, (*image)->filter,(*image)->blur,exception); break; } if (LocaleCompare("gravity",option+1) == 0) { if (*option == '+') { draw_info->gravity=UndefinedGravity; break; } draw_info->gravity=(GravityType) ParseCommandOption( MagickGravityOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("grayscale",option+1) == 0) { PixelIntensityMethod method; (void) SyncImagesSettings(mogrify_info,*image); method=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,argv[i+1]); (void) GrayscaleImage(*image,method); InheritException(exception,&(*image)->exception); break; } break; } case 'h': { if (LocaleCompare("highlight-color",option+1) == 0) { (void) SetImageArtifact(*image,"compare:highlight-color",argv[i+1]); break; } if (LocaleCompare("hough-lines",option+1) == 0) { /* Identify lines in the image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=40; mogrify_image=HoughLineImage(*image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception); break; } break; } case 'i': { if (LocaleCompare("identify",option+1) == 0) { char *text; (void) SyncImageSettings(mogrify_info,*image); if (format == (char *) NULL) { (void) IdentifyImage(*image,stdout,mogrify_info->verbose); InheritException(exception,&(*image)->exception); break; } text=InterpretImageProperties(mogrify_info,*image,format); InheritException(exception,&(*image)->exception); if (text == (char *) NULL) break; (void) fputs(text,stdout); text=DestroyString(text); break; } if (LocaleCompare("implode",option+1) == 0) { /* Implode image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=ImplodeImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->interline_spacing=geometry_info.rho; break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->interword_spacing=geometry_info.rho; break; } if (LocaleCompare("interpolative-resize",option+1) == 0) { /* Resize image using 'point sampled' interpolation */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=InterpolativeResizeImage(*image,geometry.width, geometry.height,(*image)->interpolate,exception); break; } break; } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') (void) ParseGeometry("0",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->kerning=geometry_info.rho; break; } if (LocaleCompare("kuwahara",option+1) == 0) { /* Edge preserving blur. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho-0.5; mogrify_image=KuwaharaImageChannel(*image,channel,geometry_info.rho, geometry_info.sigma,exception); break; } break; } case 'l': { if (LocaleCompare("lat",option+1) == 0) { /* Local adaptive threshold image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=AdaptiveThresholdImage(*image,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,(ssize_t) geometry_info.xi,exception); break; } if (LocaleCompare("level",option+1) == 0) { MagickRealType black_point, gamma, white_point; MagickStatusType flags; /* Parse levels. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(MagickRealType) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(MagickRealType) (QuantumRange/100.0); white_point*=(MagickRealType) (QuantumRange/100.0); } if ((flags & SigmaValue) == 0) white_point=(MagickRealType) QuantumRange-black_point; if ((*option == '+') || ((flags & AspectValue) != 0)) (void) LevelizeImageChannel(*image,channel,black_point, white_point,gamma); else (void) LevelImageChannel(*image,channel,black_point,white_point, gamma); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("level-colors",option+1) == 0) { char token[MaxTextExtent]; const char *p; MagickPixelPacket black_point, white_point; p=(const char *) argv[i+1]; GetNextToken(p,&p,MaxTextExtent,token); /* get black point color */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryMagickColor(token,&black_point,exception); else (void) QueryMagickColor("#000000",&black_point,exception); if (isalpha((int) token[0]) || (token[0] == '#')) GetNextToken(p,&p,MaxTextExtent,token); if (*token == '\0') white_point=black_point; /* set everything to that color */ else { if ((isalpha((int) *token) == 0) && ((*token == '#') == 0)) GetNextToken(p,&p,MaxTextExtent,token); /* Get white point color. */ if ((isalpha((int) *token) != 0) || ((*token == '#') != 0)) (void) QueryMagickColor(token,&white_point,exception); else (void) QueryMagickColor("#ffffff",&white_point,exception); } (void) LevelColorsImageChannel(*image,channel,&black_point, &white_point,*option == '+' ? MagickTrue : MagickFalse); break; } if (LocaleCompare("linear-stretch",option+1) == 0) { double black_point, white_point; MagickStatusType flags; (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); black_point=geometry_info.rho; white_point=(MagickRealType) (*image)->columns*(*image)->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) (*image)->columns*(*image)->rows/100.0; white_point*=(double) (*image)->columns*(*image)->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(MagickRealType) (*image)->columns*(*image)->rows- black_point; (void) LinearStretchImage(*image,black_point,white_point); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("linewidth",option+1) == 0) { draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { /* Liquid rescale image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception); if ((flags & XValue) == 0) geometry.x=1; if ((flags & YValue) == 0) geometry.y=0; mogrify_image=LiquidRescaleImage(*image,geometry.width, geometry.height,1.0*geometry.x,1.0*geometry.y,exception); break; } if (LocaleCompare("local-contrast",option+1) == 0) { MagickStatusType flags; (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & RhoValue) == 0) geometry_info.rho=10; if ((flags & SigmaValue) == 0) geometry_info.sigma=12.5; mogrify_image=LocalContrastImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("lowlight-color",option+1) == 0) { (void) SetImageArtifact(*image,"compare:lowlight-color",argv[i+1]); break; } break; } case 'm': { if (LocaleCompare("magnify",option+1) == 0) { /* Double image size. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=MagnifyImage(*image,exception); break; } if (LocaleCompare("map",option+1) == 0) { Image *remap_image; /* Transform image colors to match this set of colors. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') break; remap_image=GetImageCache(mogrify_info,argv[i+1],exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(quantize_info,*image,remap_image); InheritException(exception,&(*image)->exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("mask",option+1) == 0) { Image *mask; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { /* Remove a mask. */ (void) SetImageMask(*image,(Image *) NULL); InheritException(exception,&(*image)->exception); break; } /* Set the image mask. */ mask=GetImageCache(mogrify_info,argv[i+1],exception); if (mask == (Image *) NULL) break; (void) SetImageMask(*image,mask); mask=DestroyImage(mask); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("matte",option+1) == 0) { (void) SetImageAlphaChannel(*image,(*option == '-') ? SetAlphaChannel : DeactivateAlphaChannel ); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("mean-shift",option+1) == 0) { /* Delineate arbitrarily shaped clusters in the image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; if ((flags & XiValue) == 0) geometry_info.xi=0.10*QuantumRange; if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=MeanShiftImage(*image,(size_t) geometry_info.rho, (size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception); break; } if (LocaleCompare("median",option+1) == 0) { /* Median filter image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=StatisticImageChannel(*image,channel,MedianStatistic, (size_t) geometry_info.rho,(size_t) geometry_info.rho,exception); break; } if (LocaleCompare("mode",option+1) == 0) { /* Mode image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=StatisticImageChannel(*image,channel,ModeStatistic, (size_t) geometry_info.rho,(size_t) geometry_info.rho,exception); break; } if (LocaleCompare("modulate",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) ModulateImage(*image,argv[i+1]); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("moments",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:moments"); break; } (void) SetImageArtifact(*image,"identify:moments",argv[i+1]); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("monitor",option+1) == 0) { if (*option == '+') { (void) SetImageProgressMonitor(*image, (MagickProgressMonitor) NULL,(void *) NULL); break; } (void) SetImageProgressMonitor(*image,MonitorProgress, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) SetImageType(*image,BilevelType); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MaxTextExtent]; const char *p; KernelInfo *kernel; MorphologyMethod method; ssize_t iterations; /* Morphological Image Operation */ (void) SyncImageSettings(mogrify_info,*image); p=argv[i+1]; GetNextToken(p,&p,MaxTextExtent,token); method=(MorphologyMethod) ParseCommandOption( MagickMorphologyOptions,MagickFalse,token); iterations=1L; GetNextToken(p,&p,MaxTextExtent,token); if ((*p == ':') || (*p == ',')) GetNextToken(p,&p,MaxTextExtent,token); if ((*p != '\0')) iterations=(ssize_t) StringToLong(p); kernel=AcquireKernelInfo(argv[i+2]); if (kernel == (KernelInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnabletoParseKernel","morphology"); status=MagickFalse; break; } mogrify_image=MorphologyImageChannel(*image,channel,method, iterations,kernel,exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("motion-blur",option+1) == 0) { /* Motion blur image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=MotionBlurImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,geometry_info.xi,exception); break; } break; } case 'n': { if (LocaleCompare("negate",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) NegateImageChannel(*image,channel,*option == '+' ? MagickTrue : MagickFalse); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("noise",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); if (*option == '-') { flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=StatisticImageChannel(*image,channel, NonpeakStatistic,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); } else { NoiseType noise; noise=(NoiseType) ParseCommandOption(MagickNoiseOptions, MagickFalse,argv[i+1]); mogrify_image=AddNoiseImageChannel(*image,channel,noise, exception); } break; } if (LocaleCompare("normalize",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) NormalizeImageChannel(*image,channel); InheritException(exception,&(*image)->exception); break; } break; } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { MagickPixelPacket target; (void) SyncImageSettings(mogrify_info,*image); (void) QueryMagickColor(argv[i+1],&target,exception); (void) OpaquePaintImageChannel(*image,channel,&target,&fill, *option == '-' ? MagickFalse : MagickTrue); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) OrderedPosterizeImageChannel(*image,channel,argv[i+1], exception); break; } break; } case 'p': { if (LocaleCompare("paint",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=OilPaintImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("pen",option+1) == 0) { if (*option == '+') { (void) QueryColorDatabase("none",&draw_info->fill,exception); break; } (void) QueryColorDatabase(argv[i+1],&draw_info->fill,exception); break; } if (LocaleCompare("perceptible",option+1) == 0) { /* Perceptible image. */ (void) SyncImageSettings(mogrify_info,*image); (void) PerceptibleImageChannel(*image,channel,StringToDouble( argv[i+1],(char **) NULL)); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') (void) ParseGeometry("12",&geometry_info); else (void) ParseGeometry(argv[i+1],&geometry_info); draw_info->pointsize=geometry_info.rho; break; } if (LocaleCompare("polaroid",option+1) == 0) { double angle; RandomInfo *random_info; /* Simulate a Polaroid picture. */ (void) SyncImageSettings(mogrify_info,*image); random_info=AcquireRandomInfo(); angle=22.5*(GetPseudoRandomValue(random_info)-0.5); random_info=DestroyRandomInfo(random_info); if (*option == '-') { SetGeometryInfo(&geometry_info); flags=ParseGeometry(argv[i+1],&geometry_info); angle=geometry_info.rho; } mogrify_image=PolaroidImage(*image,draw_info,angle,exception); break; } if (LocaleCompare("posterize",option+1) == 0) { /* Posterize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) PosterizeImage(*image,StringToUnsignedLong(argv[i+1]), quantize_info->dither); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("preview",option+1) == 0) { PreviewType preview_type; /* Preview image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') preview_type=UndefinedPreview; else preview_type=(PreviewType) ParseCommandOption( MagickPreviewOptions,MagickFalse,argv[i+1]); mogrify_image=PreviewImage(*image,preview_type,exception); break; } if (LocaleCompare("profile",option+1) == 0) { const char *name; const StringInfo *profile; Image *profile_image; ImageInfo *profile_info; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') { /* Remove a profile from the image. */ (void) ProfileImage(*image,argv[i+1],(const unsigned char *) NULL,0,MagickTrue); InheritException(exception,&(*image)->exception); break; } /* Associate a profile with the image. */ profile_info=CloneImageInfo(mogrify_info); profile=GetImageProfile(*image,"iptc"); if (profile != (StringInfo *) NULL) profile_info->profile=(void *) CloneStringInfo(profile); profile_image=GetImageCache(profile_info,argv[i+1],exception); profile_info=DestroyImageInfo(profile_info); if (profile_image == (Image *) NULL) { StringInfo *profile; profile_info=CloneImageInfo(mogrify_info); (void) CopyMagickString(profile_info->filename,argv[i+1], MaxTextExtent); profile=FileToStringInfo(profile_info->filename,~0UL,exception); if (profile != (StringInfo *) NULL) { (void) SetImageInfo(profile_info,0,exception); (void) ProfileImage(*image,profile_info->magick, GetStringInfoDatum(profile),(size_t) GetStringInfoLength(profile),MagickFalse); profile=DestroyStringInfo(profile); } profile_info=DestroyImageInfo(profile_info); break; } ResetImageProfileIterator(profile_image); name=GetNextImageProfile(profile_image); while (name != (const char *) NULL) { profile=GetImageProfile(profile_image,name); if (profile != (StringInfo *) NULL) (void) ProfileImage(*image,name,GetStringInfoDatum(profile), (size_t) GetStringInfoLength(profile),MagickFalse); name=GetNextImageProfile(profile_image); } profile_image=DestroyImage(profile_image); break; } break; } case 'q': { if (LocaleCompare("quantize",option+1) == 0) { if (*option == '+') { quantize_info->colorspace=UndefinedColorspace; break; } quantize_info->colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); break; } break; } case 'r': { if (LocaleCompare("radial-blur",option+1) == 0 || LocaleCompare("rotational-blur",option+1) == 0) { /* Radial blur image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=RotationalBlurImageChannel(*image,channel, StringToDouble(argv[i+1],(char **) NULL),exception); break; } if (LocaleCompare("raise",option+1) == 0) { /* Surround image with a raise of solid color. */ flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); (void) RaiseImage(*image,&geometry,*option == '-' ? MagickTrue : MagickFalse); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("random-threshold",option+1) == 0) { /* Threshold image. */ (void) SyncImageSettings(mogrify_info,*image); (void) RandomThresholdImageChannel(*image,channel,argv[i+1], exception); break; } if (LocaleCompare("recolor",option+1) == 0) { KernelInfo *kernel; (void) SyncImageSettings(mogrify_info,*image); kernel=AcquireKernelInfo(argv[i+1]); if (kernel == (KernelInfo *) NULL) break; mogrify_image=ColorMatrixImage(*image,kernel,exception); kernel=DestroyKernelInfo(kernel); break; } if (LocaleCompare("region",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); if (region_image != (Image *) NULL) { /* Composite region. */ (void) CompositeImage(region_image,region_image->matte != MagickFalse ? CopyCompositeOp : OverCompositeOp,*image, region_geometry.x,region_geometry.y); InheritException(exception,&region_image->exception); *image=DestroyImage(*image); *image=region_image; region_image=(Image *) NULL; } if (*option == '+') break; /* Apply transformations to a selected region of the image. */ (void) ParseGravityGeometry(*image,argv[i+1],&region_geometry, exception); mogrify_image=CropImage(*image,&region_geometry,exception); if (mogrify_image == (Image *) NULL) break; region_image=(*image); *image=mogrify_image; mogrify_image=(Image *) NULL; break; } if (LocaleCompare("render",option+1) == 0) { (void) SyncImageSettings(mogrify_info,*image); draw_info->render=(*option == '+') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("remap",option+1) == 0) { Image *remap_image; /* Transform image colors to match this set of colors. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') break; remap_image=GetImageCache(mogrify_info,argv[i+1],exception); if (remap_image == (Image *) NULL) break; (void) RemapImage(quantize_info,*image,remap_image); InheritException(exception,&(*image)->exception); remap_image=DestroyImage(remap_image); break; } if (LocaleCompare("repage",option+1) == 0) { if (*option == '+') { (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); break; } (void) ResetImagePage(*image,argv[i+1]); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("resample",option+1) == 0) { /* Resample image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=ResampleImage(*image,geometry_info.rho, geometry_info.sigma,(*image)->filter,(*image)->blur,exception); break; } if (LocaleCompare("resize",option+1) == 0) { /* Resize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ResizeImage(*image,geometry.width,geometry.height, (*image)->filter,(*image)->blur,exception); break; } if (LocaleCompare("roll",option+1) == 0) { /* Roll image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); if ((flags & PercentValue) != 0) { geometry.x*=(double) (*image)->columns/100.0; geometry.y*=(double) (*image)->rows/100.0; } mogrify_image=RollImage(*image,geometry.x,geometry.y,exception); break; } if (LocaleCompare("rotate",option+1) == 0) { char *geometry; /* Check for conditional image rotation. */ (void) SyncImageSettings(mogrify_info,*image); if (strchr(argv[i+1],'>') != (char *) NULL) if ((*image)->columns <= (*image)->rows) break; if (strchr(argv[i+1],'<') != (char *) NULL) if ((*image)->columns >= (*image)->rows) break; /* Rotate image. */ geometry=ConstantString(argv[i+1]); (void) SubstituteString(&geometry,">",""); (void) SubstituteString(&geometry,"<",""); (void) ParseGeometry(geometry,&geometry_info); geometry=DestroyString(geometry); mogrify_image=RotateImage(*image,geometry_info.rho,exception); break; } break; } case 's': { if (LocaleCompare("sample",option+1) == 0) { /* Sample image with pixel replication. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=SampleImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("scale",option+1) == 0) { /* Resize image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ScaleImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("selective-blur",option+1) == 0) { /* Selectively blur pixels within a contrast threshold. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0; mogrify_image=SelectiveBlurImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("separate",option+1) == 0) { /* Break channels into separate images. WARNING: This can generate multiple images! */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=SeparateImages(*image,channel,exception); break; } if (LocaleCompare("sepia-tone",option+1) == 0) { double threshold; /* Sepia-tone image. */ (void) SyncImageSettings(mogrify_info,*image); threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); mogrify_image=SepiaToneImage(*image,threshold,exception); break; } if (LocaleCompare("segment",option+1) == 0) { /* Segment image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; (void) SegmentImage(*image,(*image)->colorspace, mogrify_info->verbose,geometry_info.rho,geometry_info.sigma); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("set",option+1) == 0) { char *value; /* Set image option. */ if (*option == '+') { if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) DeleteImageRegistry(argv[i+1]+9); else if (LocaleNCompare(argv[i+1],"option:",7) == 0) { (void) DeleteImageOption(mogrify_info,argv[i+1]+7); (void) DeleteImageArtifact(*image,argv[i+1]+7); } else (void) DeleteImageProperty(*image,argv[i+1]); break; } value=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (value == (char *) NULL) break; if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) SetImageRegistry(StringRegistryType,argv[i+1]+9,value, exception); else if (LocaleNCompare(argv[i+1],"option:",7) == 0) { (void) SetImageOption(image_info,argv[i+1]+7,value); (void) SetImageOption(mogrify_info,argv[i+1]+7,value); (void) SetImageArtifact(*image,argv[i+1]+7,value); } else (void) SetImageProperty(*image,argv[i+1],value); value=DestroyString(value); break; } if (LocaleCompare("shade",option+1) == 0) { /* Shade image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=ShadeImage(*image,(*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho,geometry_info.sigma,exception); break; } if (LocaleCompare("shadow",option+1) == 0) { /* Shadow image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=4.0; if ((flags & PsiValue) == 0) geometry_info.psi=4.0; mogrify_image=ShadowImage(*image,geometry_info.rho, geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t) ceil(geometry_info.psi-0.5),exception); break; } if (LocaleCompare("sharpen",option+1) == 0) { /* Sharpen image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=SharpenImageChannel(*image,channel,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("shave",option+1) == 0) { /* Shave the image edges. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ShaveImage(*image,&geometry,exception); break; } if (LocaleCompare("shear",option+1) == 0) { /* Shear image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; mogrify_image=ShearImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { /* Sigmoidal non-linearity contrast control. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=(double) QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/ 100.0; (void) SigmoidalContrastImageChannel(*image,channel, (*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho, geometry_info.sigma); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("sketch",option+1) == 0) { /* Sketch image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=SketchImage(*image,geometry_info.rho, geometry_info.sigma,geometry_info.xi,exception); break; } if (LocaleCompare("solarize",option+1) == 0) { double threshold; (void) SyncImageSettings(mogrify_info,*image); threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); (void) SolarizeImageChannel(*image,channel,threshold,exception); break; } if (LocaleCompare("sparse-color",option+1) == 0) { SparseColorMethod method; char *arguments; /* Sparse Color Interpolated Gradient */ (void) SyncImageSettings(mogrify_info,*image); method=(SparseColorMethod) ParseCommandOption( MagickSparseColorOptions,MagickFalse,argv[i+1]); arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2]); InheritException(exception,&(*image)->exception); if (arguments == (char *) NULL) break; mogrify_image=SparseColorOption(*image,channel,method,arguments, option[0] == '+' ? MagickTrue : MagickFalse,exception); arguments=DestroyString(arguments); break; } if (LocaleCompare("splice",option+1) == 0) { /* Splice a solid color into the image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=SpliceImage(*image,&geometry,exception); break; } if (LocaleCompare("spread",option+1) == 0) { /* Spread an image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=SpreadImage(*image,geometry_info.rho,exception); break; } if (LocaleCompare("statistic",option+1) == 0) { StatisticType type; (void) SyncImageSettings(mogrify_info,*image); type=(StatisticType) ParseCommandOption(MagickStatisticOptions, MagickFalse,argv[i+1]); (void) ParseGeometry(argv[i+2],&geometry_info); mogrify_image=StatisticImageChannel(*image,channel,type,(size_t) geometry_info.rho,(size_t) geometry_info.sigma,exception); break; } if (LocaleCompare("stretch",option+1) == 0) { if (*option == '+') { draw_info->stretch=UndefinedStretch; break; } draw_info->stretch=(StretchType) ParseCommandOption( MagickStretchOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("strip",option+1) == 0) { /* Strip image of profiles and comments. */ (void) SyncImageSettings(mogrify_info,*image); (void) StripImage(*image); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("stroke",option+1) == 0) { ExceptionInfo *sans; if (*option == '+') { (void) QueryColorDatabase("none",&draw_info->stroke,exception); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage( draw_info->stroke_pattern); break; } sans=AcquireExceptionInfo(); status=QueryColorDatabase(argv[i+1],&draw_info->stroke,sans); sans=DestroyExceptionInfo(sans); if (status == MagickFalse) draw_info->stroke_pattern=GetImageCache(mogrify_info,argv[i+1], exception); break; } if (LocaleCompare("strokewidth",option+1) == 0) { draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL); break; } if (LocaleCompare("style",option+1) == 0) { if (*option == '+') { draw_info->style=UndefinedStyle; break; } draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,argv[i+1]); break; } if (LocaleCompare("swirl",option+1) == 0) { /* Swirl image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseGeometry(argv[i+1],&geometry_info); mogrify_image=SwirlImage(*image,geometry_info.rho,exception); break; } break; } case 't': { if (LocaleCompare("threshold",option+1) == 0) { double threshold; /* Threshold image. */ (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') threshold=(double) QuantumRange/2; else threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+ 1.0); (void) BilevelImageChannel(*image,channel,threshold); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("thumbnail",option+1) == 0) { /* Thumbnail image. */ (void) SyncImageSettings(mogrify_info,*image); (void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception); mogrify_image=ThumbnailImage(*image,geometry.width,geometry.height, exception); break; } if (LocaleCompare("tile",option+1) == 0) { if (*option == '+') { if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); break; } draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1], exception); break; } if (LocaleCompare("tint",option+1) == 0) { /* Tint the image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=TintImage(*image,argv[i+1],draw_info->fill,exception); break; } if (LocaleCompare("transform",option+1) == 0) { /* Affine transform image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=AffineTransformImage(*image,&draw_info->affine, exception); break; } if (LocaleCompare("transparent",option+1) == 0) { MagickPixelPacket target; (void) SyncImageSettings(mogrify_info,*image); (void) QueryMagickColor(argv[i+1],&target,exception); (void) TransparentPaintImage(*image,&target,(Quantum) TransparentOpacity,*option == '-' ? MagickFalse : MagickTrue); InheritException(exception,&(*image)->exception); break; } if (LocaleCompare("transpose",option+1) == 0) { /* Transpose image scanlines. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=TransposeImage(*image,exception); break; } if (LocaleCompare("transverse",option+1) == 0) { /* Transverse image scanlines. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=TransverseImage(*image,exception); break; } if (LocaleCompare("treedepth",option+1) == 0) { quantize_info->tree_depth=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("trim",option+1) == 0) { /* Trim image. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=TrimImage(*image,exception); break; } if (LocaleCompare("type",option+1) == 0) { ImageType type; (void) SyncImageSettings(mogrify_info,*image); if (*option == '+') type=UndefinedType; else type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, argv[i+1]); (*image)->type=UndefinedType; (void) SetImageType(*image,type); InheritException(exception,&(*image)->exception); break; } break; } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { (void) QueryColorDatabase(argv[i+1],&draw_info->undercolor, exception); break; } if (LocaleCompare("unique",option+1) == 0) { if (*option == '+') { (void) DeleteImageArtifact(*image,"identify:unique-colors"); break; } (void) SetImageArtifact(*image,"identify:unique-colors","true"); (void) SetImageArtifact(*image,"verbose","true"); break; } if (LocaleCompare("unique-colors",option+1) == 0) { /* Unique image colors. */ (void) SyncImageSettings(mogrify_info,*image); mogrify_image=UniqueImageColors(*image,exception); break; } if (LocaleCompare("unsharp",option+1) == 0) { /* Unsharp mask image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=1.0; if ((flags & PsiValue) == 0) geometry_info.psi=0.05; mogrify_image=UnsharpMaskImageChannel(*image,channel, geometry_info.rho,geometry_info.sigma,geometry_info.xi, geometry_info.psi,exception); break; } break; } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { (void) SetImageArtifact(*image,option+1, *option == '+' ? "false" : "true"); break; } if (LocaleCompare("vignette",option+1) == 0) { /* Vignette image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; if ((flags & XiValue) == 0) geometry_info.xi=0.1*(*image)->columns; if ((flags & PsiValue) == 0) geometry_info.psi=0.1*(*image)->rows; if ((flags & PercentValue) != 0) { geometry_info.xi*=(double) (*image)->columns/100.0; geometry_info.psi*=(double) (*image)->rows/100.0; } mogrify_image=VignetteImage(*image,geometry_info.rho, geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t) ceil(geometry_info.psi-0.5),exception); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { if (*option == '+') { (void) SetImageVirtualPixelMethod(*image, UndefinedVirtualPixelMethod); break; } (void) SetImageVirtualPixelMethod(*image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i+1])); break; } break; } case 'w': { if (LocaleCompare("wave",option+1) == 0) { /* Wave image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0; mogrify_image=WaveImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { /* Wavelet denoise image. */ (void) SyncImageSettings(mogrify_info,*image); flags=ParseGeometry(argv[i+1],&geometry_info); if ((flags & PercentValue) != 0) { geometry_info.rho=QuantumRange*geometry_info.rho/100.0; geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0; } if ((flags & SigmaValue) == 0) geometry_info.sigma=0.0; mogrify_image=WaveletDenoiseImage(*image,geometry_info.rho, geometry_info.sigma,exception); break; } if (LocaleCompare("weight",option+1) == 0) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse, argv[i+1]); if (weight == -1) weight=StringToUnsignedLong(argv[i+1]); draw_info->weight=(size_t) weight; break; } if (LocaleCompare("white-threshold",option+1) == 0) { /* White threshold image. */ (void) SyncImageSettings(mogrify_info,*image); (void) WhiteThresholdImageChannel(*image,channel,argv[i+1], exception); InheritException(exception,&(*image)->exception); break; } break; } default: break; } /* Replace current image with any image that was generated. */ if (mogrify_image != (Image *) NULL) ReplaceImageInListReturnLast(image,mogrify_image); i+=count; } if (region_image != (Image *) NULL) { /* Composite transformed region onto image. */ (void) SyncImageSettings(mogrify_info,*image); (void) CompositeImage(region_image,region_image->matte != MagickFalse ? CopyCompositeOp : OverCompositeOp,*image,region_geometry.x, region_geometry.y); InheritException(exception,&region_image->exception); *image=DestroyImage(*image); *image=region_image; region_image = (Image *) NULL; } /* Free resources. */ quantize_info=DestroyQuantizeInfo(quantize_info); draw_info=DestroyDrawInfo(draw_info); mogrify_info=DestroyImageInfo(mogrify_info); status=(MagickStatusType) (exception->severity < ErrorException ? 1 : 0); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e C o m m a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageCommand() transforms an image or a sequence of images. These % transforms include image scaling, image rotation, color reduction, and % others. The transmogrified image overwrites the original image. % % The format of the MogrifyImageCommand method is: % % MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,int argc, % const char **argv,char **metadata,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o argc: the number of elements in the argument vector. % % o argv: A text array containing the command line arguments. % % o metadata: any metadata is returned here. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType MogrifyUsage(void) { static const char miscellaneous[] = " -debug events display copious debugging information\n" " -distribute-cache port\n" " distributed pixel cache spanning one or more servers\n" " -help print program options\n" " -list type print a list of supported option arguments\n" " -log format format of debugging information\n" " -version print version information", operators[] = " -adaptive-blur geometry\n" " adaptively blur pixels; decrease effect near edges\n" " -adaptive-resize geometry\n" " adaptively resize image using 'mesh' interpolation\n" " -adaptive-sharpen geometry\n" " adaptively sharpen pixels; increase effect near edges\n" " -alpha option on, activate, off, deactivate, set, opaque, copy\n" " transparent, extract, background, or shape\n" " -annotate geometry text\n" " annotate the image with text\n" " -auto-gamma automagically adjust gamma level of image\n" " -auto-level automagically adjust color levels of image\n" " -auto-orient automagically orient (rotate) image\n" " -bench iterations measure performance\n" " -black-threshold value\n" " force all pixels below the threshold into black\n" " -blue-shift simulate a scene at nighttime in the moonlight\n" " -blur geometry reduce image noise and reduce detail levels\n" " -border geometry surround image with a border of color\n" " -bordercolor color border color\n" " -brightness-contrast geometry\n" " improve brightness / contrast of the image\n" " -canny geometry detect edges in the image\n" " -cdl filename color correct with a color decision list\n" " -charcoal radius simulate a charcoal drawing\n" " -chop geometry remove pixels from the image interior\n" " -clamp keep pixel values in range (0-QuantumRange)\n" " -clip clip along the first path from the 8BIM profile\n" " -clip-mask filename associate a clip mask with the image\n" " -clip-path id clip along a named path from the 8BIM profile\n" " -colorize value colorize the image with the fill color\n" " -color-matrix matrix apply color correction to the image\n" " -connected-components connectivity\n" " connected-components uniquely labeled\n" " -contrast enhance or reduce the image contrast\n" " -contrast-stretch geometry\n" " improve contrast by `stretching' the intensity range\n" " -convolve coefficients\n" " apply a convolution kernel to the image\n" " -cycle amount cycle the image colormap\n" " -decipher filename convert cipher pixels to plain pixels\n" " -deskew threshold straighten an image\n" " -despeckle reduce the speckles within an image\n" " -distort method args\n" " distort images according to given method ad args\n" " -draw string annotate the image with a graphic primitive\n" " -edge radius apply a filter to detect edges in the image\n" " -encipher filename convert plain pixels to cipher pixels\n" " -emboss radius emboss an image\n" " -enhance apply a digital filter to enhance a noisy image\n" " -equalize perform histogram equalization to an image\n" " -evaluate operator value\n" " evaluate an arithmetic, relational, or logical expression\n" " -extent geometry set the image size\n" " -extract geometry extract area from image\n" " -hough-lines geometry\n" " identify lines in the image\n" " -features distance analyze image features (e.g. contrast, correlation)\n" " -fft implements the discrete Fourier transform (DFT)\n" " -flip flip image vertically\n" " -floodfill geometry color\n" " floodfill the image with color\n" " -flop flop image horizontally\n" " -frame geometry surround image with an ornamental border\n" " -function name parameters\n" " apply function over image values\n" " -gamma value level of gamma correction\n" " -gaussian-blur geometry\n" " reduce image noise and reduce detail levels\n" " -geometry geometry preferred size or location of the image\n" " -grayscale method convert image to grayscale\n" " -help print program options\n" " -identify identify the format and characteristics of the image\n" " -ift implements the inverse discrete Fourier transform (DFT)\n" " -implode amount implode image pixels about the center\n" " -kuwahara geometry edge preserving noise reduction filter\n" " -lat geometry local adaptive thresholding\n" " -layers method optimize, merge, or compare image layers\n" " -level value adjust the level of image contrast\n" " -level-colors color,color\n" " level image with the given colors\n" " -linear-stretch geometry\n" " improve contrast by `stretching with saturation'\n" " -liquid-rescale geometry\n" " rescale image with seam-carving\n" " -local-contrast geometry\n" " enhance local contrast\n" " -magnify double the size of the image with pixel art scaling\n" " -mean-shift geometry delineate arbitrarily shaped clusters in the image\n" " -median geometry apply a median filter to the image\n" " -mode geometry make each pixel the 'predominant color' of the\n" " neighborhood\n" " -modulate value vary the brightness, saturation, and hue\n" " -monochrome transform image to black and white\n" " -morphology method kernel\n" " apply a morphology method to the image\n" " -motion-blur geometry\n" " simulate motion blur\n" " -negate replace every pixel with its complementary color \n" " -noise geometry add or reduce noise in an image\n" " -normalize transform image to span the full range of colors\n" " -opaque color change this color to the fill color\n" " -ordered-dither NxN\n" " add a noise pattern to the image with specific\n" " amplitudes\n" " -paint radius simulate an oil painting\n" " -perceptible epsilon\n" " pixel value less than |epsilon| become epsilon or\n" " -epsilon\n" " -polaroid angle simulate a Polaroid picture\n" " -posterize levels reduce the image to a limited number of color levels\n" " -profile filename add, delete, or apply an image profile\n" " -quantize colorspace reduce colors in this colorspace\n" " -radial-blur angle radial blur the image\n" " -raise value lighten/darken image edges to create a 3-D effect\n" " -random-threshold low,high\n" " random threshold the image\n" " -region geometry apply options to a portion of the image\n" " -render render vector graphics\n" " -resample geometry change the resolution of an image\n" " -resize geometry resize the image\n" " -roll geometry roll an image vertically or horizontally\n" " -rotate degrees apply Paeth rotation to the image\n" " -sample geometry scale image with pixel sampling\n" " -scale geometry scale the image\n" " -segment values segment an image\n" " -selective-blur geometry\n" " selectively blur pixels within a contrast threshold\n" " -sepia-tone threshold\n" " simulate a sepia-toned photo\n" " -set property value set an image property\n" " -shade degrees shade the image using a distant light source\n" " -shadow geometry simulate an image shadow\n" " -sharpen geometry sharpen the image\n" " -shave geometry shave pixels from the image edges\n" " -shear geometry slide one edge of the image along the X or Y axis\n" " -sigmoidal-contrast geometry\n" " increase the contrast without saturating highlights or\n" " shadows\n" " -sketch geometry simulate a pencil sketch\n" " -solarize threshold negate all pixels above the threshold level\n" " -sparse-color method args\n" " fill in a image based on a few color points\n" " -splice geometry splice the background color into the image\n" " -spread radius displace image pixels by a random amount\n" " -statistic type radius\n" " replace each pixel with corresponding statistic from the neighborhood\n" " -strip strip image of all profiles and comments\n" " -swirl degrees swirl image pixels about the center\n" " -threshold value threshold the image\n" " -thumbnail geometry create a thumbnail of the image\n" " -tile filename tile image when filling a graphic primitive\n" " -tint value tint the image with the fill color\n" " -transform affine transform image\n" " -transparent color make this color transparent within the image\n" " -transpose flip image vertically and rotate 90 degrees\n" " -transverse flop image horizontally and rotate 270 degrees\n" " -trim trim image edges\n" " -type type image type\n" " -unique-colors discard all but one of any pixel color\n" " -unsharp geometry sharpen the image\n" " -vignette geometry soften the edges of the image in vignette style\n" " -wave geometry alter an image along a sine wave\n" " -wavelet-denoise threshold\n" " removes noise from the image using a wavelet transform\n" " -white-threshold value\n" " force all pixels above the threshold into white", sequence_operators[] = " -affinity filename transform image colors to match this set of colors\n" " -append append an image sequence\n" " -clut apply a color lookup table to the image\n" " -coalesce merge a sequence of images\n" " -combine combine a sequence of images\n" " -compare mathematically and visually annotate the difference between an image and its reconstruction\n" " -complex operator perform complex mathematics on an image sequence\n" " -composite composite image\n" " -copy geometry offset\n" " copy pixels from one area of an image to another\n" " -crop geometry cut out a rectangular region of the image\n" " -deconstruct break down an image sequence into constituent parts\n" " -evaluate-sequence operator\n" " evaluate an arithmetic, relational, or logical expression\n" " -flatten flatten a sequence of images\n" " -fx expression apply mathematical expression to an image channel(s)\n" " -hald-clut apply a Hald color lookup table to the image\n" " -layers method optimize, merge, or compare image layers\n" " -morph value morph an image sequence\n" " -mosaic create a mosaic from an image sequence\n" " -poly terms build a polynomial from the image sequence and the corresponding\n" " terms (coefficients and degree pairs).\n" " -print string interpret string and print to console\n" " -process arguments process the image with a custom image filter\n" " -separate separate an image channel into a grayscale image\n" " -smush geometry smush an image sequence together\n" " -write filename write images to this file", settings[] = " -adjoin join images into a single multi-image file\n" " -affine matrix affine transform matrix\n" " -alpha option activate, deactivate, reset, or set the alpha channel\n" " -antialias remove pixel-aliasing\n" " -authenticate password\n" " decipher image with this password\n" " -attenuate value lessen (or intensify) when adding noise to an image\n" " -background color background color\n" " -bias value add bias when convolving an image\n" " -black-point-compensation\n" " use black point compensation\n" " -blue-primary point chromaticity blue primary point\n" " -bordercolor color border color\n" " -caption string assign a caption to an image\n" " -cdl filename color correct with a color decision list\n" " -channel type apply option to select image channels\n" " -colors value preferred number of colors in the image\n" " -colorspace type alternate image colorspace\n" " -comment string annotate image with comment\n" " -compose operator set image composite operator\n" " -compress type type of pixel compression when writing the image\n" " -decipher filename convert cipher pixels to plain pixels\n" " -define format:option\n" " define one or more image format options\n" " -delay value display the next image after pausing\n" " -density geometry horizontal and vertical density of the image\n" " -depth value image depth\n" " -direction type render text right-to-left or left-to-right\n" " -display server get image or font from this X server\n" " -dispose method layer disposal method\n" " -dither method apply error diffusion to image\n" " -encipher filename convert plain pixels to cipher pixels\n" " -encoding type text encoding type\n" " -endian type endianness (MSB or LSB) of the image\n" " -family name render text with this font family\n" " -features distance analyze image features (e.g. contrast, correlation)\n" " -fill color color to use when filling a graphic primitive\n" " -filter type use this filter when resizing an image\n" " -flatten flatten a sequence of images\n" " -font name render text with this font\n" " -format \"string\" output formatted image characteristics\n" " -function name apply a function to the image\n" " -fuzz distance colors within this distance are considered equal\n" " -gravity type horizontal and vertical text placement\n" " -green-primary point chromaticity green primary point\n" " -intensity method method to generate intensity value from pixel\n" " -intent type type of rendering intent when managing the image color\n" " -interlace type type of image interlacing scheme\n" " -interline-spacing value\n" " set the space between two text lines\n" " -interpolate method pixel color interpolation method\n" " -interword-spacing value\n" " set the space between two words\n" " -kerning value set the space between two letters\n" " -label string assign a label to an image\n" " -limit type value pixel cache resource limit\n" " -loop iterations add Netscape loop extension to your GIF animation\n" " -mask filename associate a mask with the image\n" " -matte store matte channel if the image has one\n" " -mattecolor color frame color\n" " -monitor monitor progress\n" " -morphology method kernel\n" " apply a morphology method to the image\n" " -orient type image orientation\n" " -page geometry size and location of an image canvas (setting)\n" " -path path write images to this path on disk\n" " -ping efficiently determine image attributes\n" " -pointsize value font point size\n" " -precision value maximum number of significant digits to print\n" " -preview type image preview type\n" " -quality value JPEG/MIFF/PNG compression level\n" " -quiet suppress all warning messages\n" " -red-primary point chromaticity red primary point\n" " -regard-warnings pay attention to warning messages\n" " -remap filename transform image colors to match this set of colors\n" " -repage geometry size and location of an image canvas\n" " -respect-parentheses settings remain in effect until parenthesis boundary\n" " -sampling-factor geometry\n" " horizontal and vertical sampling factor\n" " -scene value image scene number\n" " -seed value seed a new sequence of pseudo-random numbers\n" " -size geometry width and height of image\n" " -stretch type render text with this font stretch\n" " -stroke color graphic primitive stroke color\n" " -strokewidth value graphic primitive stroke width\n" " -style type render text with this font style\n" " -synchronize synchronize image to storage device\n" " -taint declare the image as modified\n" " -texture filename name of texture to tile onto the image background\n" " -tile-offset geometry\n" " tile offset\n" " -treedepth value color tree depth\n" " -transparent-color color\n" " transparent color\n" " -undercolor color annotation bounding box color\n" " -units type the units of image resolution\n" " -verbose print detailed information about the image\n" " -view FlashPix viewing transforms\n" " -virtual-pixel method\n" " virtual pixel access method\n" " -weight type render text with this font weight\n" " -white-point point chromaticity white point", stack_operators[] = " -delete indexes delete the image from the image sequence\n" " -duplicate count,indexes\n" " duplicate an image one or more times\n" " -insert index insert last image into the image sequence\n" " -reverse reverse image sequence\n" " -swap indexes swap two images in the image sequence"; ListMagickVersion(stdout); (void) printf("Usage: %s [options ...] file [ [options ...] file ...]\n", GetClientName()); (void) printf("\nImage Settings:\n"); (void) puts(settings); (void) printf("\nImage Operators:\n"); (void) puts(operators); (void) printf("\nImage Sequence Operators:\n"); (void) puts(sequence_operators); (void) printf("\nImage Stack Operators:\n"); (void) puts(stack_operators); (void) printf("\nMiscellaneous Options:\n"); (void) puts(miscellaneous); (void) printf( "\nBy default, the image format of `file' is determined by its magic\n"); (void) printf( "number. To specify a particular image format, precede the filename\n"); (void) printf( "with an image format name and a colon (i.e. ps:image) or specify the\n"); (void) printf( "image type as the filename suffix (i.e. image.ps). Specify 'file' as\n"); (void) printf("'-' for standard input or output.\n"); return(MagickFalse); } WandExport MagickBooleanType MogrifyImageCommand(ImageInfo *image_info, int argc,char **argv,char **wand_unused(metadata),ExceptionInfo *exception) { #define DestroyMogrify() \ { \ if (format != (char *) NULL) \ format=DestroyString(format); \ if (path != (char *) NULL) \ path=DestroyString(path); \ DestroyImageStack(); \ for (i=0; i < (ssize_t) argc; i++) \ argv[i]=DestroyString(argv[i]); \ argv=(char **) RelinquishMagickMemory(argv); \ } #define ThrowMogrifyException(asperity,tag,option) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),asperity,tag,"`%s'", \ option); \ DestroyMogrify(); \ return(MagickFalse); \ } #define ThrowMogrifyInvalidArgumentException(option,argument) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),OptionError, \ "InvalidArgument","`%s': %s",argument,option); \ DestroyMogrify(); \ return(MagickFalse); \ } char *format, *option, *path; Image *image; ImageStack image_stack[MaxImageStackDepth+1]; MagickBooleanType global_colormap; MagickBooleanType fire, pend, respect_parenthesis; MagickStatusType status; register ssize_t i; ssize_t j, k; wand_unreferenced(metadata); /* Set defaults. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(exception != (ExceptionInfo *) NULL); if (argc == 2) { option=argv[1]; if ((LocaleCompare("version",option+1) == 0) || (LocaleCompare("-version",option+1) == 0)) { ListMagickVersion(stdout); return(MagickTrue); } } if (argc < 2) return(MogrifyUsage()); format=(char *) NULL; path=(char *) NULL; global_colormap=MagickFalse; k=0; j=1; NewImageStack(); option=(char *) NULL; pend=MagickFalse; respect_parenthesis=MagickFalse; status=MagickTrue; /* Parse command line. */ ReadCommandlLine(argc,&argv); status=ExpandFilenames(&argc,&argv); if (status == MagickFalse) ThrowMogrifyException(ResourceLimitError,"MemoryAllocationFailed", GetExceptionMessage(errno)); for (i=1; i < (ssize_t) argc; i++) { option=argv[i]; if (LocaleCompare(option,"(") == 0) { FireImageStack(MagickFalse,MagickTrue,pend); if (k == MaxImageStackDepth) ThrowMogrifyException(OptionError,"ParenthesisNestedTooDeeply", option); PushImageStack(); continue; } if (LocaleCompare(option,")") == 0) { FireImageStack(MagickFalse,MagickTrue,MagickTrue); if (k == 0) ThrowMogrifyException(OptionError,"UnableToParseExpression",option); PopImageStack(); continue; } if (IsCommandOption(option) == MagickFalse) { char backup_filename[MaxTextExtent], *filename; Image *images; struct stat properties; /* Option is a file name: begin by reading image from specified file. */ FireImageStack(MagickFalse,MagickFalse,pend); filename=argv[i]; if ((LocaleCompare(filename,"--") == 0) && (i < (ssize_t) (argc-1))) filename=argv[++i]; (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); images=ReadImages(image_info,exception); status&=(images != (Image *) NULL) && (exception->severity < ErrorException); if (images == (Image *) NULL) continue; properties=(*GetBlobProperties(images)); if (format != (char *) NULL) (void) CopyMagickString(images->filename,images->magick_filename, MaxTextExtent); if (path != (char *) NULL) { GetPathComponent(option,TailPath,filename); (void) FormatLocaleString(images->filename,MaxTextExtent,"%s%c%s", path,*DirectorySeparator,filename); } if (format != (char *) NULL) AppendImageFormat(format,images->filename); AppendImageStack(images); FinalizeImageSettings(image_info,image,MagickFalse); if (global_colormap != MagickFalse) { QuantizeInfo *quantize_info; quantize_info=AcquireQuantizeInfo(image_info); (void) RemapImages(quantize_info,images,(Image *) NULL); quantize_info=DestroyQuantizeInfo(quantize_info); } *backup_filename='\0'; if ((LocaleCompare(image->filename,"-") != 0) && (IsPathWritable(image->filename) != MagickFalse)) { register ssize_t i; /* Rename image file as backup. */ (void) CopyMagickString(backup_filename,image->filename, MaxTextExtent); for (i=0; i < 6; i++) { (void) ConcatenateMagickString(backup_filename,"~",MaxTextExtent); if (IsPathAccessible(backup_filename) == MagickFalse) break; } if ((IsPathAccessible(backup_filename) != MagickFalse) || (rename_utf8(image->filename,backup_filename) != 0)) *backup_filename='\0'; } /* Write transmogrified image to disk. */ image_info->synchronize=MagickTrue; status&=WriteImages(image_info,image,image->filename,exception); if (status != MagickFalse) { #if defined(MAGICKCORE_HAVE_UTIME) { MagickBooleanType preserve_timestamp; preserve_timestamp=IsStringTrue(GetImageOption(image_info, "preserve-timestamp")); if (preserve_timestamp != MagickFalse) { struct utimbuf timestamp; timestamp.actime=properties.st_atime; timestamp.modtime=properties.st_mtime; (void) utime(image->filename,&timestamp); } } #endif if (*backup_filename != '\0') (void) remove_utf8(backup_filename); } RemoveAllImageStack(); continue; } pend=image != (Image *) NULL ? MagickTrue : MagickFalse; switch (*(option+1)) { case 'a': { if (LocaleCompare("adaptive-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("adaptive-resize",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("adaptive-sharpen",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("affine",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("alpha",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickAlphaOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedAlphaChannelType", argv[i]); break; } if (LocaleCompare("annotate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); i++; break; } if (LocaleCompare("antialias",option+1) == 0) break; if (LocaleCompare("append",option+1) == 0) break; if (LocaleCompare("attenuate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("authenticate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("auto-gamma",option+1) == 0) break; if (LocaleCompare("auto-level",option+1) == 0) break; if (LocaleCompare("auto-orient",option+1) == 0) break; if (LocaleCompare("average",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'b': { if (LocaleCompare("background",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("bias",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) break; if (LocaleCompare("black-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blue-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blue-shift",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("border",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("box",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("brightness-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'c': { if (LocaleCompare("cache",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("canny",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("caption",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("channel",option+1) == 0) { ssize_t channel; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); channel=ParseChannelOption(argv[i]); if (channel < 0) ThrowMogrifyException(OptionError,"UnrecognizedChannelType", argv[i]); break; } if (LocaleCompare("cdl",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("charcoal",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("chop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("clamp",option+1) == 0) break; if (LocaleCompare("clip",option+1) == 0) break; if (LocaleCompare("clip-mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("clut",option+1) == 0) break; if (LocaleCompare("coalesce",option+1) == 0) break; if (LocaleCompare("colorize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("color-matrix",option+1) == 0) { KernelInfo *kernel_info; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i]); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("colors",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("colorspace",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("combine",option+1) == 0) { if (*option == '-') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("comment",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("compare",option+1) == 0) break; if (LocaleCompare("complex",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickComplexOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedComplexOperator", argv[i]); break; } if (LocaleCompare("composite",option+1) == 0) break; if (LocaleCompare("compress",option+1) == 0) { ssize_t compress; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); compress=ParseCommandOption(MagickCompressOptions,MagickFalse, argv[i]); if (compress < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageCompression", argv[i]); break; } if (LocaleCompare("concurrent",option+1) == 0) break; if (LocaleCompare("connected-components",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("contrast",option+1) == 0) break; if (LocaleCompare("contrast-stretch",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("convolve",option+1) == 0) { KernelInfo *kernel_info; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i]); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("copy",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("crop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("cycle",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'd': { if (LocaleCompare("decipher",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("deconstruct",option+1) == 0) break; if (LocaleCompare("debug",option+1) == 0) { ssize_t event; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); event=ParseCommandOption(MagickLogEventOptions,MagickFalse,argv[i]); if (event < 0) ThrowMogrifyException(OptionError,"UnrecognizedEventType", argv[i]); (void) SetLogEventMask(argv[i]); break; } if (LocaleCompare("define",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') { const char *define; define=GetImageOption(image_info,argv[i]); if (define == (const char *) NULL) ThrowMogrifyException(OptionError,"NoSuchOption",argv[i]); break; } break; } if (LocaleCompare("delay",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("delete",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("density",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("depth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("deskew",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("despeckle",option+1) == 0) break; if (LocaleCompare("dft",option+1) == 0) break; if (LocaleCompare("direction",option+1) == 0) { ssize_t direction; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, argv[i]); if (direction < 0) ThrowMogrifyException(OptionError,"UnrecognizedDirectionType", argv[i]); break; } if (LocaleCompare("display",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("dispose",option+1) == 0) { ssize_t dispose; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse, argv[i]); if (dispose < 0) ThrowMogrifyException(OptionError,"UnrecognizedDisposeMethod", argv[i]); break; } if (LocaleCompare("distort",option+1) == 0) { ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickDistortOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedDistortMethod", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("dither",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickDitherOptions,MagickFalse,argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedDitherMethod", argv[i]); break; } if (LocaleCompare("draw",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("duplicate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("duration",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'e': { if (LocaleCompare("edge",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("emboss",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("encipher",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("encoding",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("endian",option+1) == 0) { ssize_t endian; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); endian=ParseCommandOption(MagickEndianOptions,MagickFalse,argv[i]); if (endian < 0) ThrowMogrifyException(OptionError,"UnrecognizedEndianType", argv[i]); break; } if (LocaleCompare("enhance",option+1) == 0) break; if (LocaleCompare("equalize",option+1) == 0) break; if (LocaleCompare("evaluate",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("evaluate-sequence",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator", argv[i]); break; } if (LocaleCompare("extent",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("extract",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("features",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("fill",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("filter",option+1) == 0) { ssize_t filter; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); filter=ParseCommandOption(MagickFilterOptions,MagickFalse,argv[i]); if (filter < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageFilter", argv[i]); break; } if (LocaleCompare("flatten",option+1) == 0) break; if (LocaleCompare("flip",option+1) == 0) break; if (LocaleCompare("flop",option+1) == 0) break; if (LocaleCompare("floodfill",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("format",option+1) == 0) { (void) CopyMagickString(argv[i]+1,"sans",MaxTextExtent); (void) CloneString(&format,(char *) NULL); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); (void) CloneString(&format,argv[i]); (void) CopyMagickString(image_info->filename,format,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,":", MaxTextExtent); (void) SetImageInfo(image_info,0,exception); if (*image_info->magick == '\0') ThrowMogrifyException(OptionError,"UnrecognizedImageFormat", format); break; } if (LocaleCompare("frame",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("function",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickFunctionOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedFunction",argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("fuzz",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("fx",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'g': { if (LocaleCompare("gamma",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if ((LocaleCompare("gaussian-blur",option+1) == 0) || (LocaleCompare("gaussian",option+1) == 0)) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("geometry",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("gravity",option+1) == 0) { ssize_t gravity; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse, argv[i]); if (gravity < 0) ThrowMogrifyException(OptionError,"UnrecognizedGravityType", argv[i]); break; } if (LocaleCompare("grayscale",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickPixelIntensityOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError,"UnrecognizedIntensityMethod", argv[i]); break; } if (LocaleCompare("green-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) break; if (LocaleCompare("hough-lines",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if ((LocaleCompare("help",option+1) == 0) || (LocaleCompare("-help",option+1) == 0)) return(MogrifyUsage()); ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'i': { if (LocaleCompare("identify",option+1) == 0) break; if (LocaleCompare("idft",option+1) == 0) break; if (LocaleCompare("implode",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("intensity",option+1) == 0) { ssize_t intensity; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); intensity=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,argv[i]); if (intensity < 0) ThrowMogrifyException(OptionError, "UnrecognizedPixelIntensityMethod",argv[i]); break; } if (LocaleCompare("intent",option+1) == 0) { ssize_t intent; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); intent=ParseCommandOption(MagickIntentOptions,MagickFalse,argv[i]); if (intent < 0) ThrowMogrifyException(OptionError,"UnrecognizedIntentType", argv[i]); break; } if (LocaleCompare("interlace",option+1) == 0) { ssize_t interlace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); interlace=ParseCommandOption(MagickInterlaceOptions,MagickFalse, argv[i]); if (interlace < 0) ThrowMogrifyException(OptionError,"UnrecognizedInterlaceType", argv[i]); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("interpolate",option+1) == 0) { ssize_t interpolate; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); interpolate=ParseCommandOption(MagickInterpolateOptions,MagickFalse, argv[i]); if (interpolate < 0) ThrowMogrifyException(OptionError,"UnrecognizedInterpolateMethod", argv[i]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("kuwahara",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'l': { if (LocaleCompare("label",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("lat",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); } if (LocaleCompare("layers",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickLayerOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedLayerMethod", argv[i]); break; } if (LocaleCompare("level",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("level-colors",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("linewidth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("limit",option+1) == 0) { char *p; double value; ssize_t resource; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); resource=ParseCommandOption(MagickResourceOptions,MagickFalse, argv[i]); if (resource < 0) ThrowMogrifyException(OptionError,"UnrecognizedResourceType", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); value=StringToDouble(argv[i],&p); (void) value; if ((p == argv[i]) && (LocaleCompare("unlimited",argv[i]) != 0)) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("liquid-rescale",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("list",option+1) == 0) { ssize_t list; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i]); if (list < 0) ThrowMogrifyException(OptionError,"UnrecognizedListType",argv[i]); status=MogrifyImageInfo(image_info,(int) (i-j+1),(const char **) argv+j,exception); return(status == 0 ? MagickFalse : MagickTrue); } if (LocaleCompare("local-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("log",option+1) == 0) { if (*option == '+') break; i++; if ((i == (ssize_t) argc) || (strchr(argv[i],'%') == (char *) NULL)) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("loop",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'm': { if (LocaleCompare("magnify",option+1) == 0) break; if (LocaleCompare("map",option+1) == 0) { global_colormap=(*option == '+') ? MagickTrue : MagickFalse; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("mask",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("matte",option+1) == 0) break; if (LocaleCompare("mattecolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("metric",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickMetricOptions,MagickTrue,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedMetricType", argv[i]); break; } if (LocaleCompare("maximum",option+1) == 0) break; if (LocaleCompare("mean-shift",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("median",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("minimum",option+1) == 0) break; if (LocaleCompare("modulate",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("mode",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("monitor",option+1) == 0) break; if (LocaleCompare("monochrome",option+1) == 0) break; if (LocaleCompare("morph",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("morphology",option+1) == 0) { char token[MaxTextExtent]; KernelInfo *kernel_info; ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); GetNextToken(argv[i],(const char **) NULL,MaxTextExtent,token); op=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedMorphologyMethod", token); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); kernel_info=AcquireKernelInfo(argv[i]); if (kernel_info == (KernelInfo *) NULL) ThrowMogrifyInvalidArgumentException(option,argv[i]); kernel_info=DestroyKernelInfo(kernel_info); break; } if (LocaleCompare("mosaic",option+1) == 0) break; if (LocaleCompare("motion-blur",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'n': { if (LocaleCompare("negate",option+1) == 0) break; if (LocaleCompare("noise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') { ssize_t noise; noise=ParseCommandOption(MagickNoiseOptions,MagickFalse,argv[i]); if (noise < 0) ThrowMogrifyException(OptionError,"UnrecognizedNoiseType", argv[i]); break; } if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("noop",option+1) == 0) break; if (LocaleCompare("normalize",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'o': { if (LocaleCompare("opaque",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("ordered-dither",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("orient",option+1) == 0) { ssize_t orientation; orientation=UndefinedOrientation; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); orientation=ParseCommandOption(MagickOrientationOptions,MagickFalse, argv[i]); if (orientation < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageOrientation", argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'p': { if (LocaleCompare("page",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("paint",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("path",option+1) == 0) { (void) CloneString(&path,(char *) NULL); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); (void) CloneString(&path,argv[i]); break; } if (LocaleCompare("perceptible",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("polaroid",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("poly",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("posterize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("precision",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("print",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("process",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("profile",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'q': { if (LocaleCompare("quality",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("quantize",option+1) == 0) { ssize_t colorspace; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse, argv[i]); if (colorspace < 0) ThrowMogrifyException(OptionError,"UnrecognizedColorspace", argv[i]); break; } if (LocaleCompare("quiet",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'r': { if (LocaleCompare("radial-blur",option+1) == 0 || LocaleCompare("rotational-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("raise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("random-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("recolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("red-primary",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); } if (LocaleCompare("regard-warnings",option+1) == 0) break; if (LocaleCompare("region",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("remap",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("render",option+1) == 0) break; if (LocaleCompare("repage",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("resample",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("resize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleNCompare("respect-parentheses",option+1,17) == 0) { respect_parenthesis=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("reverse",option+1) == 0) break; if (LocaleCompare("roll",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("rotate",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 's': { if (LocaleCompare("sample",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sampling-factor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("scale",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("scene",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("seed",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("segment",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("selective-blur",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("separate",option+1) == 0) break; if (LocaleCompare("sepia-tone",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("set",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("shade",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shadow",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sharpen",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shave",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("shear",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sigmoidal-contrast",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("size",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sketch",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("smush",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); i++; break; } if (LocaleCompare("solarize",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("sparse-color",option+1) == 0) { ssize_t op; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickSparseColorOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedSparseColorMethod", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("splice",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("spread",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("statistic",option+1) == 0) { ssize_t op; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); op=ParseCommandOption(MagickStatisticOptions,MagickFalse,argv[i]); if (op < 0) ThrowMogrifyException(OptionError,"UnrecognizedStatisticType", argv[i]); i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("stretch",option+1) == 0) { ssize_t stretch; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,argv[i]); if (stretch < 0) ThrowMogrifyException(OptionError,"UnrecognizedStyleType", argv[i]); break; } if (LocaleCompare("strip",option+1) == 0) break; if (LocaleCompare("stroke",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("style",option+1) == 0) { ssize_t style; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); style=ParseCommandOption(MagickStyleOptions,MagickFalse,argv[i]); if (style < 0) ThrowMogrifyException(OptionError,"UnrecognizedStyleType", argv[i]); break; } if (LocaleCompare("swap",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("swirl",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("synchronize",option+1) == 0) break; ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 't': { if (LocaleCompare("taint",option+1) == 0) break; if (LocaleCompare("texture",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("tile",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("tile-offset",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("tint",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("transform",option+1) == 0) break; if (LocaleCompare("transpose",option+1) == 0) break; if (LocaleCompare("transverse",option+1) == 0) break; if (LocaleCompare("threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("thumbnail",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("transparent",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("transparent-color",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("treedepth",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("trim",option+1) == 0) break; if (LocaleCompare("type",option+1) == 0) { ssize_t type; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); type=ParseCommandOption(MagickTypeOptions,MagickFalse,argv[i]); if (type < 0) ThrowMogrifyException(OptionError,"UnrecognizedImageType", argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("unique-colors",option+1) == 0) break; if (LocaleCompare("units",option+1) == 0) { ssize_t units; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); units=ParseCommandOption(MagickResolutionOptions,MagickFalse, argv[i]); if (units < 0) ThrowMogrifyException(OptionError,"UnrecognizedUnitsType", argv[i]); break; } if (LocaleCompare("unsharp",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { image_info->verbose=(*option == '-') ? MagickTrue : MagickFalse; break; } if ((LocaleCompare("version",option+1) == 0) || (LocaleCompare("-version",option+1) == 0)) { ListMagickVersion(stdout); break; } if (LocaleCompare("view",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("vignette",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { ssize_t method; if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); method=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i]); if (method < 0) ThrowMogrifyException(OptionError, "UnrecognizedVirtualPixelMethod",argv[i]); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case 'w': { if (LocaleCompare("wave",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("wavelet-denoise",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("weight",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } if (LocaleCompare("white-point",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("white-threshold",option+1) == 0) { if (*option == '+') break; i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); if (IsGeometry(argv[i]) == MagickFalse) ThrowMogrifyInvalidArgumentException(option,argv[i]); break; } if (LocaleCompare("write",option+1) == 0) { i++; if (i == (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingArgument",option); break; } ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } case '?': break; default: ThrowMogrifyException(OptionError,"UnrecognizedOption",option) } fire=(GetCommandOptionFlags(MagickCommandOptions,MagickFalse,option) & FireOptionFlag) == 0 ? MagickFalse : MagickTrue; if (fire != MagickFalse) FireImageStack(MagickFalse,MagickTrue,MagickTrue); } if (k != 0) ThrowMogrifyException(OptionError,"UnbalancedParenthesis",argv[i]); if (i != (ssize_t) argc) ThrowMogrifyException(OptionError,"MissingAnImageFilename",argv[i]); DestroyMogrify(); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageInfo() applies image processing settings to the image as % prescribed by command line options. % % The format of the MogrifyImageInfo method is: % % MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,const int argc, % const char **argv,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info, const int argc,const char **argv,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; ssize_t count; register ssize_t i; /* Initialize method variables. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (argc < 0) return(MagickTrue); /* Set the image settings. */ for (i=0; i < (ssize_t) argc; i++) { option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=ParseCommandOption(MagickCommandOptions,MagickFalse,option); count=MagickMax(count,0L); if ((i+count) >= (ssize_t) argc) break; switch (*(option+1)) { case 'a': { if (LocaleCompare("adjoin",option+1) == 0) { image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("antialias",option+1) == 0) { image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("attenuate",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("authenticate",option+1) == 0) { if (*option == '+') (void) CloneString(&image_info->authenticate,(char *) NULL); else (void) CloneString(&image_info->authenticate,argv[i+1]); break; } break; } case 'b': { if (LocaleCompare("background",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) QueryColorDatabase(MogrifyBackgroundColor, &image_info->background_color,exception); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorDatabase(argv[i+1],&image_info->background_color, exception); break; } if (LocaleCompare("bias",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("black-point-compensation",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("blue-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("bordercolor",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) QueryColorDatabase(MogrifyBorderColor, &image_info->border_color,exception); break; } (void) QueryColorDatabase(argv[i+1],&image_info->border_color, exception); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("box",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,"undercolor","none"); break; } (void) SetImageOption(image_info,"undercolor",argv[i+1]); break; } break; } case 'c': { if (LocaleCompare("cache",option+1) == 0) { MagickSizeType limit; limit=MagickResourceInfinity; if (LocaleCompare("unlimited",argv[i+1]) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1],100.0); (void) SetMagickResourceLimit(MemoryResource,limit); (void) SetMagickResourceLimit(MapResource,2*limit); break; } if (LocaleCompare("caption",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("channel",option+1) == 0) { if (*option == '+') { image_info->channel=DefaultChannels; break; } image_info->channel=(ChannelType) ParseChannelOption(argv[i+1]); break; } if (LocaleCompare("colors",option+1) == 0) { image_info->colors=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("colorspace",option+1) == 0) { if (*option == '+') { image_info->colorspace=UndefinedColorspace; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->colorspace=(ColorspaceType) ParseCommandOption( MagickColorspaceOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("comment",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("compose",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("compress",option+1) == 0) { if (*option == '+') { image_info->compression=UndefinedCompression; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'd': { if (LocaleCompare("debug",option+1) == 0) { if (*option == '+') (void) SetLogEventMask("none"); else (void) SetLogEventMask(argv[i+1]); image_info->debug=IsEventLogging(); break; } if (LocaleCompare("define",option+1) == 0) { if (*option == '+') { if (LocaleNCompare(argv[i+1],"registry:",9) == 0) (void) DeleteImageRegistry(argv[i+1]+9); else (void) DeleteImageOption(image_info,argv[i+1]); break; } if (LocaleNCompare(argv[i+1],"registry:",9) == 0) { (void) DefineImageRegistry(StringRegistryType,argv[i+1]+9, exception); break; } (void) DefineImageOption(image_info,argv[i+1]); break; } if (LocaleCompare("delay",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("density",option+1) == 0) { /* Set image density. */ if (*option == '+') { if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); (void) SetImageOption(image_info,option+1,"72"); break; } (void) CloneString(&image_info->density,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("depth",option+1) == 0) { if (*option == '+') { image_info->depth=MAGICKCORE_QUANTUM_DEPTH; break; } image_info->depth=StringToUnsignedLong(argv[i+1]); break; } if (LocaleCompare("direction",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("display",option+1) == 0) { if (*option == '+') { if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); break; } (void) CloneString(&image_info->server_name,argv[i+1]); break; } if (LocaleCompare("dispose",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { image_info->dither=MagickFalse; (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); image_info->dither=MagickTrue; break; } break; } case 'e': { if (LocaleCompare("encoding",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("endian",option+1) == 0) { if (*option == '+') { image_info->endian=UndefinedEndian; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->endian=(EndianType) ParseCommandOption( MagickEndianOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("extract",option+1) == 0) { /* Set image extract geometry. */ if (*option == '+') { if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); break; } (void) CloneString(&image_info->extract,argv[i+1]); break; } break; } case 'f': { if (LocaleCompare("family",option+1) == 0) { if (*option != '+') (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("fill",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("filter",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("font",option+1) == 0) { if (*option == '+') { if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); break; } (void) CloneString(&image_info->font,argv[i+1]); break; } if (LocaleCompare("format",option+1) == 0) { register const char *q; for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%')) if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL) image_info->ping=MagickFalse; (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("fuzz",option+1) == 0) { if (*option == '+') { image_info->fuzz=0.0; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->fuzz=StringToDoubleInterval(argv[i+1],(double) QuantumRange+1.0); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'g': { if (LocaleCompare("gravity",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("green-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'i': { if (LocaleCompare("intensity",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("intent",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interlace",option+1) == 0) { if (*option == '+') { image_info->interlace=UndefinedInterlace; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->interlace=(InterlaceType) ParseCommandOption( MagickInterlaceOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interline-spacing",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interpolate",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("interword-spacing",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'k': { if (LocaleCompare("kerning",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"undefined"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'l': { if (LocaleCompare("label",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("limit",option+1) == 0) { MagickSizeType limit; ResourceType type; if (*option == '+') break; type=(ResourceType) ParseCommandOption(MagickResourceOptions, MagickFalse,argv[i+1]); limit=MagickResourceInfinity; if (LocaleCompare("unlimited",argv[i+2]) != 0) limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0); (void) SetMagickResourceLimit(type,limit); break; } if (LocaleCompare("list",option+1) == 0) { ssize_t list; /* Display configuration list. */ list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]); switch (list) { case MagickCoderOptions: { (void) ListCoderInfo((FILE *) NULL,exception); break; } case MagickColorOptions: { (void) ListColorInfo((FILE *) NULL,exception); break; } case MagickConfigureOptions: { (void) ListConfigureInfo((FILE *) NULL,exception); break; } case MagickDelegateOptions: { (void) ListDelegateInfo((FILE *) NULL,exception); break; } case MagickFontOptions: { (void) ListTypeInfo((FILE *) NULL,exception); break; } case MagickFormatOptions: { (void) ListMagickInfo((FILE *) NULL,exception); break; } case MagickLocaleOptions: { (void) ListLocaleInfo((FILE *) NULL,exception); break; } case MagickLogOptions: { (void) ListLogInfo((FILE *) NULL,exception); break; } case MagickMagicOptions: { (void) ListMagicInfo((FILE *) NULL,exception); break; } case MagickMimeOptions: { (void) ListMimeInfo((FILE *) NULL,exception); break; } case MagickModuleOptions: { (void) ListModuleInfo((FILE *) NULL,exception); break; } case MagickPolicyOptions: { (void) ListPolicyInfo((FILE *) NULL,exception); break; } case MagickResourceOptions: { (void) ListMagickResourceInfo((FILE *) NULL,exception); break; } case MagickThresholdOptions: { (void) ListThresholdMaps((FILE *) NULL,exception); break; } default: { (void) ListCommandOptions((FILE *) NULL,(CommandOption) list, exception); break; } } break; } if (LocaleCompare("log",option+1) == 0) { if (*option == '+') break; (void) SetLogFormat(argv[i+1]); break; } if (LocaleCompare("loop",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'm': { if (LocaleCompare("matte",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("mattecolor",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorDatabase(MogrifyMatteColor, &image_info->matte_color,exception); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); (void) QueryColorDatabase(argv[i+1],&image_info->matte_color, exception); break; } if (LocaleCompare("metric",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("monitor",option+1) == 0) { (void) SetImageInfoProgressMonitor(image_info,MonitorProgress, (void *) NULL); break; } if (LocaleCompare("monochrome",option+1) == 0) { image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse; break; } break; } case 'o': { if (LocaleCompare("orient",option+1) == 0) { if (*option == '+') { image_info->orientation=UndefinedOrientation; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } } case 'p': { if (LocaleCompare("page",option+1) == 0) { char *canonical_page, page[MaxTextExtent]; const char *image_option; MagickStatusType flags; RectangleInfo geometry; if (*option == '+') { (void) DeleteImageOption(image_info,option+1); (void) CloneString(&image_info->page,(char *) NULL); break; } (void) memset(&geometry,0,sizeof(geometry)); image_option=GetImageOption(image_info,"page"); if (image_option != (const char *) NULL) (void) ParseAbsoluteGeometry(image_option,&geometry); canonical_page=GetPageGeometry(argv[i+1]); flags=ParseAbsoluteGeometry(canonical_page,&geometry); canonical_page=DestroyString(canonical_page); (void) FormatLocaleString(page,MaxTextExtent,"%lux%lu", (unsigned long) geometry.width,(unsigned long) geometry.height); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) (void) FormatLocaleString(page,MaxTextExtent,"%lux%lu%+ld%+ld", (unsigned long) geometry.width,(unsigned long) geometry.height, (long) geometry.x,(long) geometry.y); (void) SetImageOption(image_info,option+1,page); (void) CloneString(&image_info->page,page); break; } if (LocaleCompare("pen",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"none"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("ping",option+1) == 0) { image_info->ping=(*option == '-') ? MagickTrue : MagickFalse; break; } if (LocaleCompare("pointsize",option+1) == 0) { if (*option == '+') geometry_info.rho=0.0; else (void) ParseGeometry(argv[i+1],&geometry_info); image_info->pointsize=geometry_info.rho; break; } if (LocaleCompare("precision",option+1) == 0) { (void) SetMagickPrecision(StringToInteger(argv[i+1])); break; } if (LocaleCompare("preview",option+1) == 0) { /* Preview image. */ if (*option == '+') { image_info->preview_type=UndefinedPreview; break; } image_info->preview_type=(PreviewType) ParseCommandOption( MagickPreviewOptions,MagickFalse,argv[i+1]); break; } break; } case 'q': { if (LocaleCompare("quality",option+1) == 0) { /* Set image compression quality. */ if (*option == '+') { image_info->quality=UndefinedCompressionQuality; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->quality=StringToUnsignedLong(argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("quiet",option+1) == 0) { static WarningHandler warning_handler = (WarningHandler) NULL; if (*option == '+') { /* Restore error or warning messages. */ warning_handler=SetWarningHandler(warning_handler); break; } /* Suppress error or warning messages. */ warning_handler=SetWarningHandler((WarningHandler) NULL); break; } break; } case 'r': { if (LocaleCompare("red-primary",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 's': { if (LocaleCompare("sampling-factor",option+1) == 0) { /* Set image sampling factor. */ if (*option == '+') { if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); break; } (void) CloneString(&image_info->sampling_factor,argv[i+1]); break; } if (LocaleCompare("scene",option+1) == 0) { /* Set image scene. */ if (*option == '+') { image_info->scene=0; (void) SetImageOption(image_info,option+1,"0"); break; } image_info->scene=StringToUnsignedLong(argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("seed",option+1) == 0) { unsigned long seed; if (*option == '+') { seed=(unsigned long) time((time_t *) NULL); SetRandomSecretKey(seed); break; } seed=StringToUnsignedLong(argv[i+1]); SetRandomSecretKey(seed); break; } if (LocaleCompare("size",option+1) == 0) { if (*option == '+') { if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); break; } (void) CloneString(&image_info->size,argv[i+1]); break; } if (LocaleCompare("stroke",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"none"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("strokewidth",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("style",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"none"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("synchronize",option+1) == 0) { if (*option == '+') { image_info->synchronize=MagickFalse; break; } image_info->synchronize=MagickTrue; break; } break; } case 't': { if (LocaleCompare("taint",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"false"); break; } (void) SetImageOption(image_info,option+1,"true"); break; } if (LocaleCompare("texture",option+1) == 0) { if (*option == '+') { if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); break; } (void) CloneString(&image_info->texture,argv[i+1]); break; } if (LocaleCompare("tile-offset",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("transparent-color",option+1) == 0) { if (*option == '+') { (void) QueryColorDatabase("none",&image_info->transparent_color, exception); (void) SetImageOption(image_info,option+1,"none"); break; } (void) QueryColorDatabase(argv[i+1],&image_info->transparent_color, exception); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("type",option+1) == 0) { if (*option == '+') { image_info->type=UndefinedType; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions, MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'u': { if (LocaleCompare("undercolor",option+1) == 0) { if (*option == '+') { (void) DeleteImageOption(image_info,option+1); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("units",option+1) == 0) { if (*option == '+') { image_info->units=UndefinedResolution; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->units=(ResolutionType) ParseCommandOption( MagickResolutionOptions,MagickFalse,argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'v': { if (LocaleCompare("verbose",option+1) == 0) { if (*option == '+') { image_info->verbose=MagickFalse; break; } image_info->verbose=MagickTrue; image_info->ping=MagickFalse; break; } if (LocaleCompare("view",option+1) == 0) { if (*option == '+') { if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); break; } (void) CloneString(&image_info->view,argv[i+1]); break; } if (LocaleCompare("virtual-pixel",option+1) == 0) { if (*option == '+') { image_info->virtual_pixel_method=UndefinedVirtualPixelMethod; (void) SetImageOption(image_info,option+1,"undefined"); break; } image_info->virtual_pixel_method=(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse, argv[i+1]); (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } case 'w': { if (LocaleCompare("weight",option+1) == 0) { if (*option == '+') (void) SetImageOption(image_info,option+1,"0"); else (void) SetImageOption(image_info,option+1,argv[i+1]); break; } if (LocaleCompare("white-point",option+1) == 0) { if (*option == '+') { (void) SetImageOption(image_info,option+1,"0.0"); break; } (void) SetImageOption(image_info,option+1,argv[i+1]); break; } break; } default: break; } i+=count; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImageList() applies any command line options that might affect the % entire image list (e.g. -append, -coalesce, etc.). % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImageList(ImageInfo *image_info,const int argc, % const char **argv,Image **images,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o images: pointer to pointer of the first image in image list. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info, const int argc,const char **argv,Image **images,ExceptionInfo *exception) { ChannelType channel; const char *option; ImageInfo *mogrify_info; MagickStatusType status; QuantizeInfo *quantize_info; register ssize_t i; ssize_t count, index; /* Apply options to the image list. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image **) NULL); assert((*images)->previous == (Image *) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); if ((argc <= 0) || (*argv == (char *) NULL)) return(MagickTrue); mogrify_info=CloneImageInfo(image_info); quantize_info=AcquireQuantizeInfo(mogrify_info); channel=mogrify_info->channel; status=MagickTrue; for (i=0; i < (ssize_t) argc; i++) { if (*images == (Image *) NULL) break; option=argv[i]; if (IsCommandOption(option) == MagickFalse) continue; count=ParseCommandOption(MagickCommandOptions,MagickFalse,option); count=MagickMax(count,0L); if ((i+count) >= (ssize_t) argc) break; status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception); switch (*(option+1)) { case 'a': { if (LocaleCompare("affinity",option+1) == 0) { (void) SyncImagesSettings(mogrify_info,*images); if (*option == '+') { (void) RemapImages(quantize_info,*images,(Image *) NULL); InheritException(exception,&(*images)->exception); break; } i++; break; } if (LocaleCompare("append",option+1) == 0) { Image *append_image; (void) SyncImagesSettings(mogrify_info,*images); append_image=AppendImages(*images,*option == '-' ? MagickTrue : MagickFalse,exception); if (append_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=append_image; break; } if (LocaleCompare("average",option+1) == 0) { Image *average_image; /* Average an image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images); average_image=EvaluateImages(*images,MeanEvaluateOperator, exception); if (average_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=average_image; break; } break; } case 'c': { if (LocaleCompare("channel",option+1) == 0) { if (*option == '+') { channel=DefaultChannels; break; } channel=(ChannelType) ParseChannelOption(argv[i+1]); break; } if (LocaleCompare("clut",option+1) == 0) { Image *clut_image, *image; (void) SyncImagesSettings(mogrify_info,*images); image=RemoveFirstImageFromList(images); clut_image=RemoveFirstImageFromList(images); if (clut_image == (Image *) NULL) { status=MagickFalse; break; } (void) ClutImageChannel(image,channel,clut_image); clut_image=DestroyImage(clut_image); InheritException(exception,&image->exception); *images=DestroyImageList(*images); *images=image; break; } if (LocaleCompare("coalesce",option+1) == 0) { Image *coalesce_image; (void) SyncImagesSettings(mogrify_info,*images); coalesce_image=CoalesceImages(*images,exception); if (coalesce_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=coalesce_image; break; } if (LocaleCompare("combine",option+1) == 0) { Image *combine_image; (void) SyncImagesSettings(mogrify_info,*images); combine_image=CombineImages(*images,channel,exception); if (combine_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=combine_image; break; } if (LocaleCompare("compare",option+1) == 0) { const char *option; double distortion; Image *difference_image, *image, *reconstruct_image; MetricType metric; /* Mathematically and visually annotate the difference between an image and its reconstruction. */ (void) SyncImagesSettings(mogrify_info,*images); image=RemoveFirstImageFromList(images); reconstruct_image=RemoveFirstImageFromList(images); if (reconstruct_image == (Image *) NULL) { status=MagickFalse; break; } metric=UndefinedMetric; option=GetImageOption(image_info,"metric"); if (option != (const char *) NULL) metric=(MetricType) ParseCommandOption(MagickMetricOptions, MagickFalse,option); difference_image=CompareImageChannels(image,reconstruct_image, channel,metric,&distortion,exception); if (difference_image == (Image *) NULL) break; if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=difference_image; break; } if (LocaleCompare("complex",option+1) == 0) { ComplexOperator op; Image *complex_images; (void) SyncImageSettings(mogrify_info,*images); op=(ComplexOperator) ParseCommandOption(MagickComplexOptions, MagickFalse,argv[i+1]); complex_images=ComplexImages(*images,op,exception); if (complex_images == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=complex_images; break; } if (LocaleCompare("composite",option+1) == 0) { Image *mask_image, *composite_image, *image; RectangleInfo geometry; (void) SyncImagesSettings(mogrify_info,*images); image=RemoveFirstImageFromList(images); composite_image=RemoveFirstImageFromList(images); if (composite_image == (Image *) NULL) { status=MagickFalse; break; } (void) TransformImage(&composite_image,(char *) NULL, composite_image->geometry); SetGeometry(composite_image,&geometry); (void) ParseAbsoluteGeometry(composite_image->geometry,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity, &geometry); mask_image=RemoveFirstImageFromList(images); if (mask_image != (Image *) NULL) { if ((image->compose == DisplaceCompositeOp) || (image->compose == DistortCompositeOp)) { /* Merge Y displacement into X displacement image. */ (void) CompositeImage(composite_image,CopyGreenCompositeOp, mask_image,0,0); mask_image=DestroyImage(mask_image); } else { /* Set a blending mask for the composition. */ if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); image->mask=mask_image; (void) NegateImage(image->mask,MagickFalse); } } (void) CompositeImageChannel(image,channel,image->compose, composite_image,geometry.x,geometry.y); if (mask_image != (Image *) NULL) { image->mask=DestroyImage(image->mask); mask_image=image->mask; } composite_image=DestroyImage(composite_image); InheritException(exception,&image->exception); *images=DestroyImageList(*images); *images=image; break; } if (LocaleCompare("copy",option+1) == 0) { Image *source_image; OffsetInfo offset; RectangleInfo geometry; /* Copy image pixels. */ (void) SyncImageSettings(mogrify_info,*images); (void) ParsePageGeometry(*images,argv[i+2],&geometry,exception); offset.x=geometry.x; offset.y=geometry.y; source_image=(*images); if (source_image->next != (Image *) NULL) source_image=source_image->next; (void) ParsePageGeometry(source_image,argv[i+1],&geometry, exception); status=CopyImagePixels(*images,source_image,&geometry,&offset, exception); break; } break; } case 'd': { if (LocaleCompare("deconstruct",option+1) == 0) { Image *deconstruct_image; (void) SyncImagesSettings(mogrify_info,*images); deconstruct_image=DeconstructImages(*images,exception); if (deconstruct_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=deconstruct_image; break; } if (LocaleCompare("delete",option+1) == 0) { if (*option == '+') DeleteImages(images,"-1",exception); else DeleteImages(images,argv[i+1],exception); break; } if (LocaleCompare("dither",option+1) == 0) { if (*option == '+') { quantize_info->dither=MagickFalse; break; } quantize_info->dither=MagickTrue; quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,argv[i+1]); break; } if (LocaleCompare("duplicate",option+1) == 0) { Image *duplicate_images; if (*option == '+') duplicate_images=DuplicateImages(*images,1,"-1",exception); else { const char *p; size_t number_duplicates; number_duplicates=(size_t) StringToLong(argv[i+1]); p=strchr(argv[i+1],','); if (p == (const char *) NULL) duplicate_images=DuplicateImages(*images,number_duplicates, "-1",exception); else duplicate_images=DuplicateImages(*images,number_duplicates,p, exception); } AppendImageToList(images, duplicate_images); (void) SyncImagesSettings(mogrify_info,*images); break; } break; } case 'e': { if (LocaleCompare("evaluate-sequence",option+1) == 0) { Image *evaluate_image; MagickEvaluateOperator op; (void) SyncImageSettings(mogrify_info,*images); op=(MagickEvaluateOperator) ParseCommandOption( MagickEvaluateOptions,MagickFalse,argv[i+1]); evaluate_image=EvaluateImages(*images,op,exception); if (evaluate_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=evaluate_image; break; } break; } case 'f': { if (LocaleCompare("fft",option+1) == 0) { Image *fourier_image; /* Implements the discrete Fourier transform (DFT). */ (void) SyncImageSettings(mogrify_info,*images); fourier_image=ForwardFourierTransformImage(*images,*option == '-' ? MagickTrue : MagickFalse,exception); if (fourier_image == (Image *) NULL) break; *images=DestroyImageList(*images); *images=fourier_image; break; } if (LocaleCompare("flatten",option+1) == 0) { Image *flatten_image; (void) SyncImagesSettings(mogrify_info,*images); flatten_image=MergeImageLayers(*images,FlattenLayer,exception); if (flatten_image == (Image *) NULL) break; *images=DestroyImageList(*images); *images=flatten_image; break; } if (LocaleCompare("fx",option+1) == 0) { Image *fx_image; (void) SyncImagesSettings(mogrify_info,*images); fx_image=FxImageChannel(*images,channel,argv[i+1],exception); if (fx_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=fx_image; break; } break; } case 'h': { if (LocaleCompare("hald-clut",option+1) == 0) { Image *hald_image, *image; (void) SyncImagesSettings(mogrify_info,*images); image=RemoveFirstImageFromList(images); hald_image=RemoveFirstImageFromList(images); if (hald_image == (Image *) NULL) { status=MagickFalse; break; } (void) HaldClutImageChannel(image,channel,hald_image); hald_image=DestroyImage(hald_image); InheritException(exception,&image->exception); if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=image; break; } break; } case 'i': { if (LocaleCompare("ift",option+1) == 0) { Image *fourier_image, *magnitude_image, *phase_image; /* Implements the inverse fourier discrete Fourier transform (DFT). */ (void) SyncImagesSettings(mogrify_info,*images); magnitude_image=RemoveFirstImageFromList(images); phase_image=RemoveFirstImageFromList(images); if (phase_image == (Image *) NULL) { status=MagickFalse; break; } fourier_image=InverseFourierTransformImage(magnitude_image, phase_image,*option == '-' ? MagickTrue : MagickFalse,exception); if (fourier_image == (Image *) NULL) break; if (*images != (Image *) NULL) *images=DestroyImageList(*images); *images=fourier_image; break; } if (LocaleCompare("insert",option+1) == 0) { Image *p, *q; index=0; if (*option != '+') index=(ssize_t) StringToLong(argv[i+1]); p=RemoveLastImageFromList(images); if (p == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",argv[i+1]); status=MagickFalse; break; } q=p; if (index == 0) PrependImageToList(images,q); else if (index == (ssize_t) GetImageListLength(*images)) AppendImageToList(images,q); else { q=GetImageFromList(*images,index-1); if (q == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",argv[i+1]); status=MagickFalse; break; } InsertImageInList(&q,p); } *images=GetFirstImageInList(q); break; } break; } case 'l': { if (LocaleCompare("layers",option+1) == 0) { Image *layers; ImageLayerMethod method; (void) SyncImagesSettings(mogrify_info,*images); layers=(Image *) NULL; method=(ImageLayerMethod) ParseCommandOption(MagickLayerOptions, MagickFalse,argv[i+1]); switch (method) { case CoalesceLayer: { layers=CoalesceImages(*images,exception); break; } case CompareAnyLayer: case CompareClearLayer: case CompareOverlayLayer: default: { layers=CompareImageLayers(*images,method,exception); break; } case MergeLayer: case FlattenLayer: case MosaicLayer: case TrimBoundsLayer: { layers=MergeImageLayers(*images,method,exception); break; } case DisposeLayer: { layers=DisposeImages(*images,exception); break; } case OptimizeImageLayer: { layers=OptimizeImageLayers(*images,exception); break; } case OptimizePlusLayer: { layers=OptimizePlusImageLayers(*images,exception); break; } case OptimizeTransLayer: { OptimizeImageTransparency(*images,exception); break; } case RemoveDupsLayer: { RemoveDuplicateLayers(images,exception); break; } case RemoveZeroLayer: { RemoveZeroDelayLayers(images,exception); break; } case OptimizeLayer: { /* General Purpose, GIF Animation Optimizer. */ layers=CoalesceImages(*images,exception); if (layers == (Image *) NULL) { status=MagickFalse; break; } InheritException(exception,&layers->exception); *images=DestroyImageList(*images); *images=layers; layers=OptimizeImageLayers(*images,exception); if (layers == (Image *) NULL) { status=MagickFalse; break; } InheritException(exception,&layers->exception); *images=DestroyImageList(*images); *images=layers; layers=(Image *) NULL; OptimizeImageTransparency(*images,exception); InheritException(exception,&(*images)->exception); (void) RemapImages(quantize_info,*images,(Image *) NULL); break; } case CompositeLayer: { CompositeOperator compose; Image *source; RectangleInfo geometry; /* Split image sequence at the first 'NULL:' image. */ source=(*images); while (source != (Image *) NULL) { source=GetNextImageInList(source); if ((source != (Image *) NULL) && (LocaleCompare(source->magick,"NULL") == 0)) break; } if (source != (Image *) NULL) { if ((GetPreviousImageInList(source) == (Image *) NULL) || (GetNextImageInList(source) == (Image *) NULL)) source=(Image *) NULL; else { /* Separate the two lists, junk the null: image. */ source=SplitImageList(source->previous); DeleteImageFromList(&source); } } if (source == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"MissingNullSeparator","layers Composite"); status=MagickFalse; break; } /* Adjust offset with gravity and virtual canvas. */ SetGeometry(*images,&geometry); (void) ParseAbsoluteGeometry((*images)->geometry,&geometry); geometry.width=source->page.width != 0 ? source->page.width : source->columns; geometry.height=source->page.height != 0 ? source->page.height : source->rows; GravityAdjustGeometry((*images)->page.width != 0 ? (*images)->page.width : (*images)->columns, (*images)->page.height != 0 ? (*images)->page.height : (*images)->rows,(*images)->gravity,&geometry); compose=OverCompositeOp; option=GetImageOption(mogrify_info,"compose"); if (option != (const char *) NULL) compose=(CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,option); CompositeLayers(*images,compose,source,geometry.x,geometry.y, exception); source=DestroyImageList(source); break; } } if (layers == (Image *) NULL) break; InheritException(exception,&layers->exception); *images=DestroyImageList(*images); *images=layers; break; } break; } case 'm': { if (LocaleCompare("map",option+1) == 0) { (void) SyncImagesSettings(mogrify_info,*images); if (*option == '+') { (void) RemapImages(quantize_info,*images,(Image *) NULL); InheritException(exception,&(*images)->exception); break; } i++; break; } if (LocaleCompare("maximum",option+1) == 0) { Image *maximum_image; /* Maximum image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images); maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception); if (maximum_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=maximum_image; break; } if (LocaleCompare("minimum",option+1) == 0) { Image *minimum_image; /* Minimum image sequence (deprecated). */ (void) SyncImagesSettings(mogrify_info,*images); minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception); if (minimum_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=minimum_image; break; } if (LocaleCompare("morph",option+1) == 0) { Image *morph_image; (void) SyncImagesSettings(mogrify_info,*images); morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]), exception); if (morph_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=morph_image; break; } if (LocaleCompare("mosaic",option+1) == 0) { Image *mosaic_image; (void) SyncImagesSettings(mogrify_info,*images); mosaic_image=MergeImageLayers(*images,MosaicLayer,exception); if (mosaic_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=mosaic_image; break; } break; } case 'p': { if (LocaleCompare("poly",option+1) == 0) { char *args, token[MaxTextExtent]; const char *p; double *arguments; Image *polynomial_image; register ssize_t x; size_t number_arguments; /* Polynomial image. */ (void) SyncImageSettings(mogrify_info,*images); args=InterpretImageProperties(mogrify_info,*images,argv[i+1]); InheritException(exception,&(*images)->exception); if (args == (char *) NULL) break; p=(char *) args; for (x=0; *p != '\0'; x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); } number_arguments=(size_t) x; arguments=(double *) AcquireQuantumMemory(number_arguments, sizeof(*arguments)); if (arguments == (double *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed",(*images)->filename); (void) memset(arguments,0,number_arguments* sizeof(*arguments)); p=(char *) args; for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arguments[x]=StringToDouble(token,(char **) NULL); } args=DestroyString(args); polynomial_image=PolynomialImageChannel(*images,channel, number_arguments >> 1,arguments,exception); arguments=(double *) RelinquishMagickMemory(arguments); if (polynomial_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=polynomial_image; break; } if (LocaleCompare("print",option+1) == 0) { char *string; (void) SyncImagesSettings(mogrify_info,*images); string=InterpretImageProperties(mogrify_info,*images,argv[i+1]); if (string == (char *) NULL) break; InheritException(exception,&(*images)->exception); (void) FormatLocaleFile(stdout,"%s",string); string=DestroyString(string); } if (LocaleCompare("process",option+1) == 0) { char **arguments; int j, number_arguments; (void) SyncImagesSettings(mogrify_info,*images); arguments=StringToArgv(argv[i+1],&number_arguments); if (arguments == (char **) NULL) break; if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL)) { char breaker, quote, *token; const char *arguments; int next, status; size_t length; TokenInfo *token_info; /* Support old style syntax, filter="-option arg". */ length=strlen(argv[i+1]); token=(char *) NULL; if (~length >= (MaxTextExtent-1)) token=(char *) AcquireQuantumMemory(length+MaxTextExtent, sizeof(*token)); if (token == (char *) NULL) break; next=0; arguments=argv[i+1]; token_info=AcquireTokenInfo(); status=Tokenizer(token_info,0,token,length,arguments,"","=", "\"",'\0',&breaker,&next,&quote); token_info=DestroyTokenInfo(token_info); if (status == 0) { const char *argv; argv=(&(arguments[next])); (void) InvokeDynamicImageFilter(token,&(*images),1,&argv, exception); } token=DestroyString(token); break; } (void) SubstituteString(&arguments[1],"-",""); (void) InvokeDynamicImageFilter(arguments[1],&(*images), number_arguments-2,(const char **) arguments+2,exception); for (j=0; j < number_arguments; j++) arguments[j]=DestroyString(arguments[j]); arguments=(char **) RelinquishMagickMemory(arguments); break; } break; } case 'r': { if (LocaleCompare("reverse",option+1) == 0) { ReverseImageList(images); InheritException(exception,&(*images)->exception); break; } break; } case 's': { if (LocaleCompare("smush",option+1) == 0) { Image *smush_image; ssize_t offset; (void) SyncImagesSettings(mogrify_info,*images); offset=(ssize_t) StringToLong(argv[i+1]); smush_image=SmushImages(*images,*option == '-' ? MagickTrue : MagickFalse,offset,exception); if (smush_image == (Image *) NULL) { status=MagickFalse; break; } *images=DestroyImageList(*images); *images=smush_image; break; } if (LocaleCompare("swap",option+1) == 0) { Image *p, *q, *u, *v; ssize_t swap_index; index=(-1); swap_index=(-2); if (*option != '+') { GeometryInfo geometry_info; MagickStatusType flags; swap_index=(-1); flags=ParseGeometry(argv[i+1],&geometry_info); index=(ssize_t) geometry_info.rho; if ((flags & SigmaValue) != 0) swap_index=(ssize_t) geometry_info.sigma; } p=GetImageFromList(*images,index); q=GetImageFromList(*images,swap_index); if ((p == (Image *) NULL) || (q == (Image *) NULL)) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"NoSuchImage","`%s'",(*images)->filename); status=MagickFalse; break; } if (p == q) break; u=CloneImage(p,0,0,MagickTrue,exception); if (u == (Image *) NULL) break; v=CloneImage(q,0,0,MagickTrue,exception); if (v == (Image *) NULL) { u=DestroyImage(u); break; } ReplaceImageInList(&p,v); ReplaceImageInList(&q,u); *images=GetFirstImageInList(q); break; } break; } case 'w': { if (LocaleCompare("write",option+1) == 0) { char key[MaxTextExtent]; Image *write_images; ImageInfo *write_info; (void) SyncImagesSettings(mogrify_info,*images); (void) FormatLocaleString(key,MaxTextExtent,"cache:%s",argv[i+1]); (void) DeleteImageRegistry(key); write_images=(*images); if (*option == '+') write_images=CloneImageList(*images,exception); write_info=CloneImageInfo(mogrify_info); status&=WriteImages(write_info,write_images,argv[i+1],exception); write_info=DestroyImageInfo(write_info); if (*option == '+') write_images=DestroyImageList(write_images); break; } break; } default: break; } i+=count; } quantize_info=DestroyQuantizeInfo(quantize_info); mogrify_info=DestroyImageInfo(mogrify_info); status&=MogrifyImageInfo(image_info,argc,argv,exception); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M o g r i f y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MogrifyImages() applies image processing options to a sequence of images as % prescribed by command line options. % % The format of the MogrifyImage method is: % % MagickBooleanType MogrifyImages(ImageInfo *image_info, % const MagickBooleanType post,const int argc,const char **argv, % Image **images,Exceptioninfo *exception) % % A description of each parameter follows: % % o image_info: the image info.. % % o post: If true, post process image list operators otherwise pre-process. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o images: pointer to a pointer of the first image in image list. % % o exception: return any errors or warnings in this structure. % */ WandExport MagickBooleanType MogrifyImages(ImageInfo *image_info, const MagickBooleanType post,const int argc,const char **argv, Image **images,ExceptionInfo *exception) { #define MogrifyImageTag "Mogrify/Image" MagickStatusType status; MagickBooleanType proceed; size_t n; register ssize_t i; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (images == (Image **) NULL) return(MogrifyImage(image_info,argc,argv,images,exception)); assert((*images)->previous == (Image *) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); if ((argc <= 0) || (*argv == (char *) NULL)) return(MagickTrue); (void) SetImageInfoProgressMonitor(image_info,(MagickProgressMonitor) NULL, (void *) NULL); status=MagickTrue; #if 0 (void) FormatLocaleFile(stderr, "mogrify start %s %d (%s)\n",argv[0],argc, post?"post":"pre"); #endif /* Pre-process multi-image sequence operators */ if (post == MagickFalse) status&=MogrifyImageList(image_info,argc,argv,images,exception); /* For each image, process simple single image operators */ i=0; n=GetImageListLength(*images); for (;;) { #if 0 (void) FormatLocaleFile(stderr,"mogrify %ld of %ld\n",(long) GetImageIndexInList(*images),(long)GetImageListLength(*images)); #endif status&=MogrifyImage(image_info,argc,argv,images,exception); proceed=SetImageProgress(*images,MogrifyImageTag,(MagickOffsetType) i, n); if (proceed == MagickFalse) break; if ((*images)->next == (Image *) NULL) break; *images=(*images)->next; i++; } assert(*images != (Image *) NULL); #if 0 (void) FormatLocaleFile(stderr,"mogrify end %ld of %ld\n",(long) GetImageIndexInList(*images),(long)GetImageListLength(*images)); #endif /* Post-process, multi-image sequence operators */ *images=GetFirstImageInList(*images); if (post != MagickFalse) status&=MogrifyImageList(image_info,argc,argv,images,exception); return(status != 0 ? MagickTrue : MagickFalse); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_945_0
crossvul-cpp_data_bad_4815_2
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L AAA BBBB EEEEE L % % L A A B B E L % % L AAAAA BBBB EEE L % % L A A B B E L % % LLLLL A A BBBB EEEEE LLLLL % % % % % % Read ASCII String As An Image. % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/draw.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/pixel-accessor.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" #include "magick/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d L A B E L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadLABELImage() reads a LABEL image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadLABELImage method is: % % Image *ReadLABELImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadLABELImage(const ImageInfo *image_info, ExceptionInfo *exception) { char geometry[MaxTextExtent], *property; const char *label; DrawInfo *draw_info; Image *image; MagickBooleanType status; TypeMetric metrics; size_t height, width; /* Initialize Image structure. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); (void) ResetImagePage(image,"0x0+0+0"); property=InterpretImageProperties(image_info,image,image_info->filename); (void) SetImageProperty(image,"label",property); property=DestroyString(property); label=GetImageProperty(image,"label"); draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->text=ConstantString(label); metrics.width=0; metrics.ascent=0.0; status=GetMultilineTypeMetrics(image,draw_info,&metrics); if ((image->columns == 0) && (image->rows == 0)) { image->columns=(size_t) floor(metrics.width+draw_info->stroke_width+0.5); image->rows=(size_t) floor(metrics.height+draw_info->stroke_width+0.5); } else if ((strlen(label) > 0) && (((image->columns == 0) || (image->rows == 0)) || (fabs(image_info->pointsize) < MagickEpsilon))) { double high, low; /* Auto fit text into bounding box. */ for ( ; ; draw_info->pointsize*=2.0) { (void) FormatLocaleString(geometry,MaxTextExtent,"%+g%+g", -metrics.bounds.x1,metrics.ascent); if (draw_info->gravity == UndefinedGravity) (void) CloneString(&draw_info->geometry,geometry); (void) GetMultilineTypeMetrics(image,draw_info,&metrics); width=(size_t) floor(metrics.width+draw_info->stroke_width+0.5); height=(size_t) floor(metrics.height+draw_info->stroke_width+0.5); if ((image->columns != 0) && (image->rows != 0)) { if ((width >= image->columns) && (height >= image->rows)) break; } else if (((image->columns != 0) && (width >= image->columns)) || ((image->rows != 0) && (height >= image->rows))) break; } high=draw_info->pointsize; for (low=1.0; (high-low) > 0.5; ) { draw_info->pointsize=(low+high)/2.0; (void) FormatLocaleString(geometry,MaxTextExtent,"%+g%+g", -metrics.bounds.x1,metrics.ascent); if (draw_info->gravity == UndefinedGravity) (void) CloneString(&draw_info->geometry,geometry); (void) GetMultilineTypeMetrics(image,draw_info,&metrics); width=(size_t) floor(metrics.width+draw_info->stroke_width+0.5); height=(size_t) floor(metrics.height+draw_info->stroke_width+0.5); if ((image->columns != 0) && (image->rows != 0)) { if ((width < image->columns) && (height < image->rows)) low=draw_info->pointsize+0.5; else high=draw_info->pointsize-0.5; } else if (((image->columns != 0) && (width < image->columns)) || ((image->rows != 0) && (height < image->rows))) low=draw_info->pointsize+0.5; else high=draw_info->pointsize-0.5; } draw_info->pointsize=(low+high)/2.0-0.5; } status=GetMultilineTypeMetrics(image,draw_info,&metrics); if (status == MagickFalse) { draw_info=DestroyDrawInfo(draw_info); InheritException(exception,&image->exception); image=DestroyImageList(image); return((Image *) NULL); } if (image->columns == 0) image->columns=(size_t) floor(metrics.width+draw_info->stroke_width+0.5); if (image->columns == 0) image->columns=(size_t) floor(draw_info->pointsize+draw_info->stroke_width+ 0.5); if (image->rows == 0) image->rows=(size_t) floor(metrics.ascent-metrics.descent+ draw_info->stroke_width+0.5); if (image->rows == 0) image->rows=(size_t) floor(draw_info->pointsize+draw_info->stroke_width+ 0.5); status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { draw_info=DestroyDrawInfo(draw_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (SetImageBackgroundColor(image) == MagickFalse) { draw_info=DestroyDrawInfo(draw_info); InheritException(exception,&image->exception); image=DestroyImageList(image); return((Image *) NULL); } /* Draw label. */ (void) FormatLocaleString(geometry,MaxTextExtent,"%+g%+g", draw_info->direction == RightToLeftDirection ? image->columns- metrics.bounds.x2 : 0.0,draw_info->gravity == UndefinedGravity ? metrics.ascent : 0.0); draw_info->geometry=AcquireString(geometry); status=AnnotateImage(image,draw_info); if (image_info->pointsize == 0.0) { char pointsize[MaxTextExtent]; (void) FormatLocaleString(pointsize,MaxTextExtent,"%.20g", draw_info->pointsize); (void) SetImageProperty(image,"label:pointsize",pointsize); } draw_info=DestroyDrawInfo(draw_info); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r L A B E L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterLABELImage() adds properties for the LABEL image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterLABELImage method is: % % size_t RegisterLABELImage(void) % */ ModuleExport size_t RegisterLABELImage(void) { MagickInfo *entry; entry=SetMagickInfo("LABEL"); entry->decoder=(DecodeImageHandler *) ReadLABELImage; entry->adjoin=MagickFalse; entry->format_type=ImplicitFormatType; entry->description=ConstantString("Image label"); entry->module=ConstantString("LABEL"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r L A B E L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterLABELImage() removes format registrations made by the % LABEL module from the list of supported formats. % % The format of the UnregisterLABELImage method is: % % UnregisterLABELImage(void) % */ ModuleExport void UnregisterLABELImage(void) { (void) UnregisterMagickInfo("LABEL"); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_4815_2
crossvul-cpp_data_bad_1716_0
/* * Performance counter callchain support - powerpc architecture code * * Copyright © 2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/pgtable.h> #include <asm/sigcontext.h> #include <asm/ucontext.h> #include <asm/vdso.h> #ifdef CONFIG_PPC64 #include "../kernel/ppc32.h" #endif /* * Is sp valid as the address of the next kernel stack frame after prev_sp? * The next frame may be in a different stack area but should not go * back down in the same stack area. */ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) { if (sp & 0xf) return 0; /* must be 16-byte aligned */ if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) return 0; if (sp >= prev_sp + STACK_FRAME_MIN_SIZE) return 1; /* * sp could decrease when we jump off an interrupt stack * back to the regular process stack. */ if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1))) return 1; return 0; } void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long sp, next_sp; unsigned long next_ip; unsigned long lr; long level = 0; unsigned long *fp; lr = regs->link; sp = regs->gpr[1]; perf_callchain_store(entry, perf_instruction_pointer(regs)); if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) return; for (;;) { fp = (unsigned long *) sp; next_sp = fp[0]; if (next_sp == sp + STACK_INT_FRAME_SIZE && fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { /* * This looks like an interrupt frame for an * interrupt that occurred in the kernel */ regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD); next_ip = regs->nip; lr = regs->link; level = 0; perf_callchain_store(entry, PERF_CONTEXT_KERNEL); } else { if (level == 0) next_ip = lr; else next_ip = fp[STACK_FRAME_LR_SAVE]; /* * We can't tell which of the first two addresses * we get are valid, but we can filter out the * obviously bogus ones here. We replace them * with 0 rather than removing them entirely so * that userspace can tell which is which. */ if ((level == 1 && next_ip == lr) || (level <= 1 && !kernel_text_address(next_ip))) next_ip = 0; ++level; } perf_callchain_store(entry, next_ip); if (!valid_next_sp(next_sp, sp)) return; sp = next_sp; } } #ifdef CONFIG_PPC64 /* * On 64-bit we don't want to invoke hash_page on user addresses from * interrupt context, so if the access faults, we read the page tables * to find which page (if any) is mapped and access it directly. */ static int read_user_stack_slow(void __user *ptr, void *ret, int nb) { pgd_t *pgdir; pte_t *ptep, pte; unsigned shift; unsigned long addr = (unsigned long) ptr; unsigned long offset; unsigned long pfn; void *kaddr; pgdir = current->mm->pgd; if (!pgdir) return -EFAULT; ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift); if (!shift) shift = PAGE_SHIFT; /* align address to page boundary */ offset = addr & ((1UL << shift) - 1); addr -= offset; if (ptep == NULL) return -EFAULT; pte = *ptep; if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER)) return -EFAULT; pfn = pte_pfn(pte); if (!page_is_ram(pfn)) return -EFAULT; /* no highmem to worry about here */ kaddr = pfn_to_kaddr(pfn); memcpy(ret, kaddr + offset, nb); return 0; } static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) { if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) || ((unsigned long)ptr & 7)) return -EFAULT; pagefault_disable(); if (!__get_user_inatomic(*ret, ptr)) { pagefault_enable(); return 0; } pagefault_enable(); return read_user_stack_slow(ptr, ret, 8); } static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) { if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || ((unsigned long)ptr & 3)) return -EFAULT; pagefault_disable(); if (!__get_user_inatomic(*ret, ptr)) { pagefault_enable(); return 0; } pagefault_enable(); return read_user_stack_slow(ptr, ret, 4); } static inline int valid_user_sp(unsigned long sp, int is_64) { if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32) return 0; return 1; } /* * 64-bit user processes use the same stack frame for RT and non-RT signals. */ struct signal_frame_64 { char dummy[__SIGNAL_FRAMESIZE]; struct ucontext uc; unsigned long unused[2]; unsigned int tramp[6]; struct siginfo *pinfo; void *puc; struct siginfo info; char abigap[288]; }; static int is_sigreturn_64_address(unsigned long nip, unsigned long fp) { if (nip == fp + offsetof(struct signal_frame_64, tramp)) return 1; if (vdso64_rt_sigtramp && current->mm->context.vdso_base && nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) return 1; return 0; } /* * Do some sanity checking on the signal frame pointed to by sp. * We check the pinfo and puc pointers in the frame. */ static int sane_signal_64_frame(unsigned long sp) { struct signal_frame_64 __user *sf; unsigned long pinfo, puc; sf = (struct signal_frame_64 __user *) sp; if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) || read_user_stack_64((unsigned long __user *) &sf->puc, &puc)) return 0; return pinfo == (unsigned long) &sf->info && puc == (unsigned long) &sf->uc; } static void perf_callchain_user_64(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long sp, next_sp; unsigned long next_ip; unsigned long lr; long level = 0; struct signal_frame_64 __user *sigframe; unsigned long __user *fp, *uregs; next_ip = perf_instruction_pointer(regs); lr = regs->link; sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); for (;;) { fp = (unsigned long __user *) sp; if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) return; if (level > 0 && read_user_stack_64(&fp[2], &next_ip)) return; /* * Note: the next_sp - sp >= signal frame size check * is true when next_sp < sp, which can happen when * transitioning from an alternate signal stack to the * normal stack. */ if (next_sp - sp >= sizeof(struct signal_frame_64) && (is_sigreturn_64_address(next_ip, sp) || (level <= 1 && is_sigreturn_64_address(lr, sp))) && sane_signal_64_frame(sp)) { /* * This looks like an signal frame */ sigframe = (struct signal_frame_64 __user *) sp; uregs = sigframe->uc.uc_mcontext.gp_regs; if (read_user_stack_64(&uregs[PT_NIP], &next_ip) || read_user_stack_64(&uregs[PT_LNK], &lr) || read_user_stack_64(&uregs[PT_R1], &sp)) return; level = 0; perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, next_ip); continue; } if (level == 0) next_ip = lr; perf_callchain_store(entry, next_ip); ++level; sp = next_sp; } } static inline int current_is_64bit(void) { /* * We can't use test_thread_flag() here because we may be on an * interrupt stack, and the thread flags don't get copied over * from the thread_info on the main stack to the interrupt stack. */ return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT); } #else /* CONFIG_PPC64 */ /* * On 32-bit we just access the address and let hash_page create a * HPTE if necessary, so there is no need to fall back to reading * the page tables. Since this is called at interrupt level, * do_page_fault() won't treat a DSI as a page fault. */ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) { int rc; if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || ((unsigned long)ptr & 3)) return -EFAULT; pagefault_disable(); rc = __get_user_inatomic(*ret, ptr); pagefault_enable(); return rc; } static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, struct pt_regs *regs) { } static inline int current_is_64bit(void) { return 0; } static inline int valid_user_sp(unsigned long sp, int is_64) { if (!sp || (sp & 7) || sp > TASK_SIZE - 32) return 0; return 1; } #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE #define sigcontext32 sigcontext #define mcontext32 mcontext #define ucontext32 ucontext #define compat_siginfo_t struct siginfo #endif /* CONFIG_PPC64 */ /* * Layout for non-RT signal frames */ struct signal_frame_32 { char dummy[__SIGNAL_FRAMESIZE32]; struct sigcontext32 sctx; struct mcontext32 mctx; int abigap[56]; }; /* * Layout for RT signal frames */ struct rt_signal_frame_32 { char dummy[__SIGNAL_FRAMESIZE32 + 16]; compat_siginfo_t info; struct ucontext32 uc; int abigap[56]; }; static int is_sigreturn_32_address(unsigned int nip, unsigned int fp) { if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) return 1; if (vdso32_sigtramp && current->mm->context.vdso_base && nip == current->mm->context.vdso_base + vdso32_sigtramp) return 1; return 0; } static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp) { if (nip == fp + offsetof(struct rt_signal_frame_32, uc.uc_mcontext.mc_pad)) return 1; if (vdso32_rt_sigtramp && current->mm->context.vdso_base && nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) return 1; return 0; } static int sane_signal_32_frame(unsigned int sp) { struct signal_frame_32 __user *sf; unsigned int regs; sf = (struct signal_frame_32 __user *) (unsigned long) sp; if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs)) return 0; return regs == (unsigned long) &sf->mctx; } static int sane_rt_signal_32_frame(unsigned int sp) { struct rt_signal_frame_32 __user *sf; unsigned int regs; sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs)) return 0; return regs == (unsigned long) &sf->uc.uc_mcontext; } static unsigned int __user *signal_frame_32_regs(unsigned int sp, unsigned int next_sp, unsigned int next_ip) { struct mcontext32 __user *mctx = NULL; struct signal_frame_32 __user *sf; struct rt_signal_frame_32 __user *rt_sf; /* * Note: the next_sp - sp >= signal frame size check * is true when next_sp < sp, for example, when * transitioning from an alternate signal stack to the * normal stack. */ if (next_sp - sp >= sizeof(struct signal_frame_32) && is_sigreturn_32_address(next_ip, sp) && sane_signal_32_frame(sp)) { sf = (struct signal_frame_32 __user *) (unsigned long) sp; mctx = &sf->mctx; } if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) && is_rt_sigreturn_32_address(next_ip, sp) && sane_rt_signal_32_frame(sp)) { rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; mctx = &rt_sf->uc.uc_mcontext; } if (!mctx) return NULL; return mctx->mc_gregs; } static void perf_callchain_user_32(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned int sp, next_sp; unsigned int next_ip; unsigned int lr; long level = 0; unsigned int __user *fp, *uregs; next_ip = perf_instruction_pointer(regs); lr = regs->link; sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); while (entry->nr < PERF_MAX_STACK_DEPTH) { fp = (unsigned int __user *) (unsigned long) sp; if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) return; if (level > 0 && read_user_stack_32(&fp[1], &next_ip)) return; uregs = signal_frame_32_regs(sp, next_sp, next_ip); if (!uregs && level <= 1) uregs = signal_frame_32_regs(sp, next_sp, lr); if (uregs) { /* * This looks like an signal frame, so restart * the stack trace with the values in it. */ if (read_user_stack_32(&uregs[PT_NIP], &next_ip) || read_user_stack_32(&uregs[PT_LNK], &lr) || read_user_stack_32(&uregs[PT_R1], &sp)) return; level = 0; perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, next_ip); continue; } if (level == 0) next_ip = lr; perf_callchain_store(entry, next_ip); ++level; sp = next_sp; } } void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { if (current_is_64bit()) perf_callchain_user_64(entry, regs); else perf_callchain_user_32(entry, regs); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_1716_0
crossvul-cpp_data_bad_5006_2
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/module.h> #include <linux/atomic.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/timer.h> #include <linux/netfilter.h> #include <net/protocol.h> #include <net/ip.h> #include <net/checksum.h> #include <net/route.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/ipv4/nf_nat_masquerade.h> unsigned int nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, const struct nf_nat_range *range, const struct net_device *out) { struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; struct nf_nat_range newrange; const struct rtable *rt; __be32 newsrc, nh; NF_CT_ASSERT(hooknum == NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); nat = nfct_nat(ct); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY)); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) return NF_ACCEPT; rt = skb_rtable(skb); nh = rt_nexthop(rt, ip_hdr(skb)->daddr); newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE); if (!newsrc) { pr_info("%s ate my IP address\n", out->name); return NF_DROP; } nat->masq_index = out->ifindex; /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newsrc; newrange.max_addr.ip = newsrc; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); static int device_cmp(struct nf_conn *i, void *ifindex) { const struct nf_conn_nat *nat = nfct_nat(i); if (!nat) return 0; if (nf_ct_l3num(i) != NFPROTO_IPV4) return 0; return nat->masq_index == (int)(long)ifindex; } static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_DOWN) { /* Device was downed. Search entire table for * conntracks which were associated with that device, * and forget them. */ NF_CT_ASSERT(dev->ifindex != 0); nf_ct_iterate_cleanup(net, device_cmp, (void *)(long)dev->ifindex, 0, 0); } return NOTIFY_DONE; } static int masq_inet_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; struct netdev_notifier_info info; netdev_notifier_info_init(&info, dev); return masq_device_event(this, event, &info); } static struct notifier_block masq_dev_notifier = { .notifier_call = masq_device_event, }; static struct notifier_block masq_inet_notifier = { .notifier_call = masq_inet_event, }; static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); void nf_nat_masquerade_ipv4_register_notifier(void) { /* check if the notifier was already set */ if (atomic_inc_return(&masquerade_notifier_refcount) > 1) return; /* Register for device down reports */ register_netdevice_notifier(&masq_dev_notifier); /* Register IP address change reports */ register_inetaddr_notifier(&masq_inet_notifier); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier); void nf_nat_masquerade_ipv4_unregister_notifier(void) { /* check if the notifier still has clients */ if (atomic_dec_return(&masquerade_notifier_refcount) > 0) return; unregister_netdevice_notifier(&masq_dev_notifier); unregister_inetaddr_notifier(&masq_inet_notifier); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
./CrossVul/dataset_final_sorted/CWE-399/c/bad_5006_2
crossvul-cpp_data_good_5768_0
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include "iodev.h" #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/percpu.h> #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/vmalloc.h> #include <linux/reboot.h> #include <linux/debugfs.h> #include <linux/highmem.h> #include <linux/file.h> #include <linux/syscore_ops.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/smp.h> #include <linux/anon_inodes.h> #include <linux/profile.h> #include <linux/kvm_para.h> #include <linux/pagemap.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/compat.h> #include <linux/srcu.h> #include <linux/hugetlb.h> #include <linux/slab.h> #include <linux/sort.h> #include <linux/bsearch.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include "coalesced_mmio.h" #include "async_pf.h" #define CREATE_TRACE_POINTS #include <trace/events/kvm.h> MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); /* * Ordering of locks: * * kvm->lock --> kvm->slots_lock --> kvm->irq_lock */ DEFINE_RAW_SPINLOCK(kvm_lock); LIST_HEAD(vm_list); static cpumask_var_t cpus_hardware_enabled; static int kvm_usage_count = 0; static atomic_t hardware_enable_failed; struct kmem_cache *kvm_vcpu_cache; EXPORT_SYMBOL_GPL(kvm_vcpu_cache); static __read_mostly struct preempt_ops kvm_preempt_ops; struct dentry *kvm_debugfs_dir; static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); #ifdef CONFIG_COMPAT static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); #endif static int hardware_enable_all(void); static void hardware_disable_all(void); static void kvm_io_bus_destroy(struct kvm_io_bus *bus); bool kvm_rebooting; EXPORT_SYMBOL_GPL(kvm_rebooting); static bool largepages_enabled = true; bool kvm_is_mmio_pfn(pfn_t pfn) { if (pfn_valid(pfn)) { int reserved; struct page *tail = pfn_to_page(pfn); struct page *head = compound_trans_head(tail); reserved = PageReserved(head); if (head != tail) { /* * "head" is not a dangling pointer * (compound_trans_head takes care of that) * but the hugepage may have been splitted * from under us (and we may not hold a * reference count on the head page so it can * be reused before we run PageReferenced), so * we've to check PageTail before returning * what we just read. */ smp_rmb(); if (PageTail(tail)) return reserved; } return PageReserved(tail); } return true; } /* * Switches to specified vcpu, until a matching vcpu_put() */ void vcpu_load(struct kvm_vcpu *vcpu) { int cpu; mutex_lock(&vcpu->mutex); if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { /* The thread running this VCPU changed. */ struct pid *oldpid = vcpu->pid; struct pid *newpid = get_task_pid(current, PIDTYPE_PID); rcu_assign_pointer(vcpu->pid, newpid); synchronize_rcu(); put_pid(oldpid); } cpu = get_cpu(); preempt_notifier_register(&vcpu->preempt_notifier); kvm_arch_vcpu_load(vcpu, cpu); put_cpu(); } void vcpu_put(struct kvm_vcpu *vcpu) { preempt_disable(); kvm_arch_vcpu_put(vcpu); preempt_notifier_unregister(&vcpu->preempt_notifier); preempt_enable(); mutex_unlock(&vcpu->mutex); } static void ack_flush(void *_completed) { } static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) { int i, cpu, me; cpumask_var_t cpus; bool called = true; struct kvm_vcpu *vcpu; zalloc_cpumask_var(&cpus, GFP_ATOMIC); me = get_cpu(); kvm_for_each_vcpu(i, vcpu, kvm) { kvm_make_request(req, vcpu); cpu = vcpu->cpu; /* Set ->requests bit before we read ->mode */ smp_mb(); if (cpus != NULL && cpu != -1 && cpu != me && kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) cpumask_set_cpu(cpu, cpus); } if (unlikely(cpus == NULL)) smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); else if (!cpumask_empty(cpus)) smp_call_function_many(cpus, ack_flush, NULL, 1); else called = false; put_cpu(); free_cpumask_var(cpus); return called; } void kvm_flush_remote_tlbs(struct kvm *kvm) { long dirty_count = kvm->tlbs_dirty; smp_mb(); if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.remote_tlb_flush; cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); } void kvm_reload_remote_mmus(struct kvm *kvm) { make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { struct page *page; int r; mutex_init(&vcpu->mutex); vcpu->cpu = -1; vcpu->kvm = kvm; vcpu->vcpu_id = id; vcpu->pid = NULL; init_waitqueue_head(&vcpu->wq); kvm_async_pf_vcpu_init(vcpu); page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->run = page_address(page); kvm_vcpu_set_in_spin_loop(vcpu, false); kvm_vcpu_set_dy_eligible(vcpu, false); r = kvm_arch_vcpu_init(vcpu); if (r < 0) goto fail_free_run; return 0; fail_free_run: free_page((unsigned long)vcpu->run); fail: return r; } EXPORT_SYMBOL_GPL(kvm_vcpu_init); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) { put_pid(vcpu->pid); kvm_arch_vcpu_uninit(vcpu); free_page((unsigned long)vcpu->run); } EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) { return container_of(mn, struct kvm, mmu_notifier); } static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int need_tlb_flush, idx; /* * When ->invalidate_page runs, the linux pte has been zapped * already but the page is still allocated until * ->invalidate_page returns. So if we increase the sequence * here the kvm page fault will notice if the spte can't be * established because the page is going to be freed. If * instead the kvm page fault establishes the spte before * ->invalidate_page runs, kvm_unmap_hva will release it * before returning. * * The sequence increase only need to be seen at spin_unlock * time, and not at spin_lock time. * * Increasing the sequence after the spin_unlock would be * unsafe because the kvm page fault could then establish the * pte after kvm_unmap_hva returned, without noticing the page * is going to be freed. */ idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); kvm->mmu_notifier_seq++; need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); } static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, pte_t pte) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); kvm->mmu_notifier_seq++; kvm_set_spte_hva(kvm, address, pte); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); } static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int need_tlb_flush = 0, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); /* * The count increase must become visible at unlock time as no * spte can be established without taking the mmu_lock and * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); } static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); spin_lock(&kvm->mmu_lock); /* * This sequence increase will notify the kvm page fault that * the page that is going to be mapped in the spte could have * been freed. */ kvm->mmu_notifier_seq++; smp_wmb(); /* * The above sequence increase must be visible before the * below count decrease, which is ensured by the smp_wmb above * in conjunction with the smp_rmb in mmu_notifier_retry(). */ kvm->mmu_notifier_count--; spin_unlock(&kvm->mmu_lock); BUG_ON(kvm->mmu_notifier_count < 0); } static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); young = kvm_age_hva(kvm, address); if (young) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return young; } static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int young, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); young = kvm_test_age_hva(kvm, address); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return young; } static void kvm_mmu_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int idx; idx = srcu_read_lock(&kvm->srcu); kvm_arch_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { .invalidate_page = kvm_mmu_notifier_invalidate_page, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, .test_young = kvm_mmu_notifier_test_young, .change_pte = kvm_mmu_notifier_change_pte, .release = kvm_mmu_notifier_release, }; static int kvm_init_mmu_notifier(struct kvm *kvm) { kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; return mmu_notifier_register(&kvm->mmu_notifier, current->mm); } #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ static int kvm_init_mmu_notifier(struct kvm *kvm) { return 0; } #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ static void kvm_init_memslots_id(struct kvm *kvm) { int i; struct kvm_memslots *slots = kvm->memslots; for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) slots->id_to_index[i] = slots->memslots[i].id = i; } static struct kvm *kvm_create_vm(unsigned long type) { int r, i; struct kvm *kvm = kvm_arch_alloc_vm(); if (!kvm) return ERR_PTR(-ENOMEM); r = kvm_arch_init_vm(kvm, type); if (r) goto out_err_nodisable; r = hardware_enable_all(); if (r) goto out_err_nodisable; #ifdef CONFIG_HAVE_KVM_IRQCHIP INIT_HLIST_HEAD(&kvm->mask_notifier_list); INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif r = -ENOMEM; kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!kvm->memslots) goto out_err_nosrcu; kvm_init_memslots_id(kvm); if (init_srcu_struct(&kvm->srcu)) goto out_err_nosrcu; for (i = 0; i < KVM_NR_BUSES; i++) { kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); if (!kvm->buses[i]) goto out_err; } spin_lock_init(&kvm->mmu_lock); kvm->mm = current->mm; atomic_inc(&kvm->mm->mm_count); kvm_eventfd_init(kvm); mutex_init(&kvm->lock); mutex_init(&kvm->irq_lock); mutex_init(&kvm->slots_lock); atomic_set(&kvm->users_count, 1); r = kvm_init_mmu_notifier(kvm); if (r) goto out_err; raw_spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); raw_spin_unlock(&kvm_lock); return kvm; out_err: cleanup_srcu_struct(&kvm->srcu); out_err_nosrcu: hardware_disable_all(); out_err_nodisable: for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm->buses[i]); kfree(kvm->memslots); kvm_arch_free_vm(kvm); return ERR_PTR(r); } /* * Avoid using vmalloc for a small buffer. * Should not be used when the size is statically known. */ void *kvm_kvzalloc(unsigned long size) { if (size > PAGE_SIZE) return vzalloc(size); else return kzalloc(size, GFP_KERNEL); } void kvm_kvfree(const void *addr) { if (is_vmalloc_addr(addr)) vfree(addr); else kfree(addr); } static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) { if (!memslot->dirty_bitmap) return; kvm_kvfree(memslot->dirty_bitmap); memslot->dirty_bitmap = NULL; } /* * Free any memory in @free but not in @dont. */ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { if (!dont || free->dirty_bitmap != dont->dirty_bitmap) kvm_destroy_dirty_bitmap(free); kvm_arch_free_memslot(free, dont); free->npages = 0; } void kvm_free_physmem(struct kvm *kvm) { struct kvm_memslots *slots = kvm->memslots; struct kvm_memory_slot *memslot; kvm_for_each_memslot(memslot, slots) kvm_free_physmem_slot(memslot, NULL); kfree(kvm->memslots); } static void kvm_destroy_vm(struct kvm *kvm) { int i; struct mm_struct *mm = kvm->mm; kvm_arch_sync_events(kvm); raw_spin_lock(&kvm_lock); list_del(&kvm->vm_list); raw_spin_unlock(&kvm_lock); kvm_free_irq_routing(kvm); for (i = 0; i < KVM_NR_BUSES; i++) kvm_io_bus_destroy(kvm->buses[i]); kvm_coalesced_mmio_free(kvm); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else kvm_arch_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_free_physmem(kvm); cleanup_srcu_struct(&kvm->srcu); kvm_arch_free_vm(kvm); hardware_disable_all(); mmdrop(mm); } void kvm_get_kvm(struct kvm *kvm) { atomic_inc(&kvm->users_count); } EXPORT_SYMBOL_GPL(kvm_get_kvm); void kvm_put_kvm(struct kvm *kvm) { if (atomic_dec_and_test(&kvm->users_count)) kvm_destroy_vm(kvm); } EXPORT_SYMBOL_GPL(kvm_put_kvm); static int kvm_vm_release(struct inode *inode, struct file *filp) { struct kvm *kvm = filp->private_data; kvm_irqfd_release(kvm); kvm_put_kvm(kvm); return 0; } /* * Allocation size is twice as large as the actual dirty bitmap size. * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. */ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) { #ifndef CONFIG_S390 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); if (!memslot->dirty_bitmap) return -ENOMEM; #endif /* !CONFIG_S390 */ return 0; } static int cmp_memslot(const void *slot1, const void *slot2) { struct kvm_memory_slot *s1, *s2; s1 = (struct kvm_memory_slot *)slot1; s2 = (struct kvm_memory_slot *)slot2; if (s1->npages < s2->npages) return 1; if (s1->npages > s2->npages) return -1; return 0; } /* * Sort the memslots base on its size, so the larger slots * will get better fit. */ static void sort_memslots(struct kvm_memslots *slots) { int i; sort(slots->memslots, KVM_MEM_SLOTS_NUM, sizeof(struct kvm_memory_slot), cmp_memslot, NULL); for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) slots->id_to_index[slots->memslots[i].id] = i; } void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) { if (new) { int id = new->id; struct kvm_memory_slot *old = id_to_memslot(slots, id); unsigned long npages = old->npages; *old = *new; if (new->npages != npages) sort_memslots(slots); } slots->generation++; } static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) { u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; #ifdef KVM_CAP_READONLY_MEM valid_flags |= KVM_MEM_READONLY; #endif if (mem->flags & ~valid_flags) return -EINVAL; return 0; } /* * Allocate some memory and give it an address in the guest physical address * space. * * Discontiguous memory is allowed, mostly for framebuffers. * * Must be called holding mmap_sem for write. */ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc) { int r; gfn_t base_gfn; unsigned long npages; unsigned long i; struct kvm_memory_slot *memslot; struct kvm_memory_slot old, new; struct kvm_memslots *slots, *old_memslots; r = check_memory_region_flags(mem); if (r) goto out; r = -EINVAL; /* General sanity checks */ if (mem->memory_size & (PAGE_SIZE - 1)) goto out; if (mem->guest_phys_addr & (PAGE_SIZE - 1)) goto out; /* We can read the guest memory with __xxx_user() later on. */ if (user_alloc && ((mem->userspace_addr & (PAGE_SIZE - 1)) || !access_ok(VERIFY_WRITE, (void __user *)(unsigned long)mem->userspace_addr, mem->memory_size))) goto out; if (mem->slot >= KVM_MEM_SLOTS_NUM) goto out; if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) goto out; memslot = id_to_memslot(kvm->memslots, mem->slot); base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT; r = -EINVAL; if (npages > KVM_MEM_MAX_NR_PAGES) goto out; if (!npages) mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; new = old = *memslot; new.id = mem->slot; new.base_gfn = base_gfn; new.npages = npages; new.flags = mem->flags; /* Disallow changing a memory slot's size. */ r = -EINVAL; if (npages && old.npages && npages != old.npages) goto out_free; /* Check for overlaps */ r = -EEXIST; for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; if (s == memslot || !s->npages) continue; if (!((base_gfn + npages <= s->base_gfn) || (base_gfn >= s->base_gfn + s->npages))) goto out_free; } /* Free page dirty bitmap if unneeded */ if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) new.dirty_bitmap = NULL; r = -ENOMEM; /* Allocate if a slot is being created */ if (npages && !old.npages) { new.user_alloc = user_alloc; new.userspace_addr = mem->userspace_addr; if (kvm_arch_create_memslot(&new, npages)) goto out_free; } /* Allocate page dirty bitmap if needed */ if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { if (kvm_create_dirty_bitmap(&new) < 0) goto out_free; /* destroy any largepage mappings for dirty tracking */ } if (!npages || base_gfn != old.base_gfn) { struct kvm_memory_slot *slot; r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out_free; slot = id_to_memslot(slots, mem->slot); slot->flags |= KVM_MEMSLOT_INVALID; update_memslots(slots, NULL); old_memslots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); /* From this point no new shadow pages pointing to a deleted, * or moved, memslot will be created. * * validation of sp->gfn happens in: * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) * - kvm_is_visible_gfn (mmu_check_roots) */ kvm_arch_flush_shadow_memslot(kvm, slot); kfree(old_memslots); } r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); if (r) goto out_free; /* map/unmap the pages in iommu page table */ if (npages) { r = kvm_iommu_map_pages(kvm, &new); if (r) goto out_free; } else kvm_iommu_unmap_pages(kvm, &old); r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out_free; /* actual memory is freed via old in kvm_free_physmem_slot below */ if (!npages) { new.dirty_bitmap = NULL; memset(&new.arch, 0, sizeof(new.arch)); } update_memslots(slots, &new); old_memslots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); /* * If the new memory slot is created, we need to clear all * mmio sptes. */ if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) kvm_arch_flush_shadow_all(kvm); kvm_free_physmem_slot(&old, &new); kfree(old_memslots); return 0; out_free: kvm_free_physmem_slot(&new, &old); out: return r; } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc) { int r; mutex_lock(&kvm->slots_lock); r = __kvm_set_memory_region(kvm, mem, user_alloc); mutex_unlock(&kvm->slots_lock); return r; } EXPORT_SYMBOL_GPL(kvm_set_memory_region); int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc) { if (mem->slot >= KVM_MEMORY_SLOTS) return -EINVAL; return kvm_set_memory_region(kvm, mem, user_alloc); } int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty) { struct kvm_memory_slot *memslot; int r, i; unsigned long n; unsigned long any = 0; r = -EINVAL; if (log->slot >= KVM_MEMORY_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); r = -ENOENT; if (!memslot->dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); for (i = 0; !any && i < n/sizeof(long); ++i) any = memslot->dirty_bitmap[i]; r = -EFAULT; if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) goto out; if (any) *is_dirty = 1; r = 0; out: return r; } bool kvm_largepages_enabled(void) { return largepages_enabled; } void kvm_disable_largepages(void) { largepages_enabled = false; } EXPORT_SYMBOL_GPL(kvm_disable_largepages); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) { return __gfn_to_memslot(kvm_memslots(kvm), gfn); } EXPORT_SYMBOL_GPL(gfn_to_memslot); int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); if (!memslot || memslot->id >= KVM_MEMORY_SLOTS || memslot->flags & KVM_MEMSLOT_INVALID) return 0; return 1; } EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) { struct vm_area_struct *vma; unsigned long addr, size; size = PAGE_SIZE; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return PAGE_SIZE; down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, addr); if (!vma) goto out; size = vma_kernel_pagesize(vma); out: up_read(&current->mm->mmap_sem); return size; } static bool memslot_is_readonly(struct kvm_memory_slot *slot) { return slot->flags & KVM_MEM_READONLY; } static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages, bool write) { if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return KVM_HVA_ERR_BAD; if (memslot_is_readonly(slot) && write) return KVM_HVA_ERR_RO_BAD; if (nr_pages) *nr_pages = slot->npages - (gfn - slot->base_gfn); return __gfn_to_hva_memslot(slot, gfn); } static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages) { return __gfn_to_hva_many(slot, gfn, nr_pages, true); } unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { return gfn_to_hva_many(slot, gfn, NULL); } EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) { return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); } EXPORT_SYMBOL_GPL(gfn_to_hva); /* * The hva returned by this function is only allowed to be read. * It should pair with kvm_read_hva() or kvm_read_hva_atomic(). */ static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn) { return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false); } static int kvm_read_hva(void *data, void __user *hva, int len) { return __copy_from_user(data, hva, len); } static int kvm_read_hva_atomic(void *data, void __user *hva, int len) { return __copy_from_user_inatomic(data, hva, len); } int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int write, struct page **page) { int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; if (write) flags |= FOLL_WRITE; return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); } static inline int check_user_page_hwpoison(unsigned long addr) { int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; rc = __get_user_pages(current, current->mm, addr, 1, flags, NULL, NULL, NULL); return rc == -EHWPOISON; } /* * The atomic path to get the writable pfn which will be stored in @pfn, * true indicates success, otherwise false is returned. */ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable, pfn_t *pfn) { struct page *page[1]; int npages; if (!(async || atomic)) return false; /* * Fast pin a writable pfn only if it is a write fault request * or the caller allows to map a writable pfn for a read fault * request. */ if (!(write_fault || writable)) return false; npages = __get_user_pages_fast(addr, 1, 1, page); if (npages == 1) { *pfn = page_to_pfn(page[0]); if (writable) *writable = true; return true; } return false; } /* * The slow path to get the pfn of the specified host virtual address, * 1 indicates success, -errno is returned if error is detected. */ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, bool *writable, pfn_t *pfn) { struct page *page[1]; int npages = 0; might_sleep(); if (writable) *writable = write_fault; if (async) { down_read(&current->mm->mmap_sem); npages = get_user_page_nowait(current, current->mm, addr, write_fault, page); up_read(&current->mm->mmap_sem); } else npages = get_user_pages_fast(addr, 1, write_fault, page); if (npages != 1) return npages; /* map read fault as writable if possible */ if (unlikely(!write_fault) && writable) { struct page *wpage[1]; npages = __get_user_pages_fast(addr, 1, 1, wpage); if (npages == 1) { *writable = true; put_page(page[0]); page[0] = wpage[0]; } npages = 1; } *pfn = page_to_pfn(page[0]); return npages; } static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) { if (unlikely(!(vma->vm_flags & VM_READ))) return false; if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) return false; return true; } /* * Pin guest page in memory and return its pfn. * @addr: host virtual address which maps memory to the guest * @atomic: whether this function can sleep * @async: whether this function need to wait IO complete if the * host page is not in the memory * @write_fault: whether we should get a writable host page * @writable: whether it allows to map a writable host page for !@write_fault * * The function will map a writable host page for these two cases: * 1): @write_fault = true * 2): @write_fault = false && @writable, @writable will tell the caller * whether the mapping is writable. */ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, bool write_fault, bool *writable) { struct vm_area_struct *vma; pfn_t pfn = 0; int npages; /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) return pfn; if (atomic) return KVM_PFN_ERR_FAULT; npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); if (npages == 1) return pfn; down_read(&current->mm->mmap_sem); if (npages == -EHWPOISON || (!async && check_user_page_hwpoison(addr))) { pfn = KVM_PFN_ERR_HWPOISON; goto exit; } vma = find_vma_intersection(current->mm, addr, addr + 1); if (vma == NULL) pfn = KVM_PFN_ERR_FAULT; else if ((vma->vm_flags & VM_PFNMAP)) { pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; BUG_ON(!kvm_is_mmio_pfn(pfn)); } else { if (async && vma_is_valid(vma, write_fault)) *async = true; pfn = KVM_PFN_ERR_FAULT; } exit: up_read(&current->mm->mmap_sem); return pfn; } static pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable) { unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); if (addr == KVM_HVA_ERR_RO_BAD) return KVM_PFN_ERR_RO_FAULT; if (kvm_is_error_hva(addr)) return KVM_PFN_ERR_BAD; /* Do not map writable pfn in the readonly memslot. */ if (writable && memslot_is_readonly(slot)) { *writable = false; writable = NULL; } return hva_to_pfn(addr, atomic, async, write_fault, writable); } static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable) { struct kvm_memory_slot *slot; if (async) *async = false; slot = gfn_to_memslot(kvm, gfn); return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, writable); } pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) { return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, bool write_fault, bool *writable) { return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); } EXPORT_SYMBOL_GPL(gfn_to_pfn_async); pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) { return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn); pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable) { return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); } EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); } pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, int nr_pages) { unsigned long addr; gfn_t entry; addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); if (kvm_is_error_hva(addr)) return -1; if (entry < nr_pages) return 0; return __get_user_pages_fast(addr, nr_pages, 1, pages); } EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); static struct page *kvm_pfn_to_page(pfn_t pfn) { if (is_error_pfn(pfn)) return KVM_ERR_PTR_BAD_PAGE; if (kvm_is_mmio_pfn(pfn)) { WARN_ON(1); return KVM_ERR_PTR_BAD_PAGE; } return pfn_to_page(pfn); } struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) { pfn_t pfn; pfn = gfn_to_pfn(kvm, gfn); return kvm_pfn_to_page(pfn); } EXPORT_SYMBOL_GPL(gfn_to_page); void kvm_release_page_clean(struct page *page) { WARN_ON(is_error_page(page)); kvm_release_pfn_clean(page_to_pfn(page)); } EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(pfn_t pfn) { WARN_ON(is_error_pfn(pfn)); if (!kvm_is_mmio_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); void kvm_release_page_dirty(struct page *page) { WARN_ON(is_error_page(page)); kvm_release_pfn_dirty(page_to_pfn(page)); } EXPORT_SYMBOL_GPL(kvm_release_page_dirty); void kvm_release_pfn_dirty(pfn_t pfn) { kvm_set_pfn_dirty(pfn); kvm_release_pfn_clean(pfn); } EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); void kvm_set_page_dirty(struct page *page) { kvm_set_pfn_dirty(page_to_pfn(page)); } EXPORT_SYMBOL_GPL(kvm_set_page_dirty); void kvm_set_pfn_dirty(pfn_t pfn) { if (!kvm_is_mmio_pfn(pfn)) { struct page *page = pfn_to_page(pfn); if (!PageReserved(page)) SetPageDirty(page); } } EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); void kvm_set_pfn_accessed(pfn_t pfn) { if (!kvm_is_mmio_pfn(pfn)) mark_page_accessed(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); void kvm_get_pfn(pfn_t pfn) { if (!kvm_is_mmio_pfn(pfn)) get_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_get_pfn); static int next_segment(unsigned long len, int offset) { if (len > PAGE_SIZE - offset) return PAGE_SIZE - offset; else return len; } int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len) { int r; unsigned long addr; addr = gfn_to_hva_read(kvm, gfn); if (kvm_is_error_hva(addr)) return -EFAULT; r = kvm_read_hva(data, (void __user *)addr + offset, len); if (r) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(kvm_read_guest_page); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; int seg; int offset = offset_in_page(gpa); int ret; while ((seg = next_segment(len, offset)) != 0) { ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); if (ret < 0) return ret; offset = 0; len -= seg; data += seg; ++gfn; } return 0; } EXPORT_SYMBOL_GPL(kvm_read_guest); int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) { int r; unsigned long addr; gfn_t gfn = gpa >> PAGE_SHIFT; int offset = offset_in_page(gpa); addr = gfn_to_hva_read(kvm, gfn); if (kvm_is_error_hva(addr)) return -EFAULT; pagefault_disable(); r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len); pagefault_enable(); if (r) return -EFAULT; return 0; } EXPORT_SYMBOL(kvm_read_guest_atomic); int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len) { int r; unsigned long addr; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return -EFAULT; r = __copy_to_user((void __user *)addr + offset, data, len); if (r) return -EFAULT; mark_page_dirty(kvm, gfn); return 0; } EXPORT_SYMBOL_GPL(kvm_write_guest_page); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; int seg; int offset = offset_in_page(gpa); int ret; while ((seg = next_segment(len, offset)) != 0) { ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); if (ret < 0) return ret; offset = 0; len -= seg; data += seg; ++gfn; } return 0; } int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa) { struct kvm_memslots *slots = kvm_memslots(kvm); int offset = offset_in_page(gpa); gfn_t gfn = gpa >> PAGE_SHIFT; ghc->gpa = gpa; ghc->generation = slots->generation; ghc->memslot = gfn_to_memslot(kvm, gfn); ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); if (!kvm_is_error_hva(ghc->hva)) ghc->hva += offset; else return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int r; if (slots->generation != ghc->generation) kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; r = __copy_to_user((void __user *)ghc->hva, data, len); if (r) return -EFAULT; mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); return 0; } EXPORT_SYMBOL_GPL(kvm_write_guest_cached); int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int r; if (slots->generation != ghc->generation) kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; r = __copy_from_user(data, (void __user *)ghc->hva, len); if (r) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(kvm_read_guest_cached); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) { return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page, offset, len); } EXPORT_SYMBOL_GPL(kvm_clear_guest_page); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; int seg; int offset = offset_in_page(gpa); int ret; while ((seg = next_segment(len, offset)) != 0) { ret = kvm_clear_guest_page(kvm, gfn, offset, seg); if (ret < 0) return ret; offset = 0; len -= seg; ++gfn; } return 0; } EXPORT_SYMBOL_GPL(kvm_clear_guest); void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn) { if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; /* TODO: introduce set_bit_le() and use it */ test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap); } } void mark_page_dirty(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *memslot; memslot = gfn_to_memslot(kvm, gfn); mark_page_dirty_in_slot(kvm, memslot, gfn); } /* * The vCPU has executed a HLT instruction with in-kernel mode enabled. */ void kvm_vcpu_block(struct kvm_vcpu *vcpu) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); if (kvm_arch_vcpu_runnable(vcpu)) { kvm_make_request(KVM_REQ_UNHALT, vcpu); break; } if (kvm_cpu_has_pending_timer(vcpu)) break; if (signal_pending(current)) break; schedule(); } finish_wait(&vcpu->wq, &wait); } #ifndef CONFIG_S390 /* * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. */ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int me; int cpu = vcpu->cpu; wait_queue_head_t *wqp; wqp = kvm_arch_vcpu_wq(vcpu); if (waitqueue_active(wqp)) { wake_up_interruptible(wqp); ++vcpu->stat.halt_wakeup; } me = get_cpu(); if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) if (kvm_arch_vcpu_should_kick(vcpu)) smp_send_reschedule(cpu); put_cpu(); } #endif /* !CONFIG_S390 */ void kvm_resched(struct kvm_vcpu *vcpu) { if (!need_resched()) return; cond_resched(); } EXPORT_SYMBOL_GPL(kvm_resched); bool kvm_vcpu_yield_to(struct kvm_vcpu *target) { struct pid *pid; struct task_struct *task = NULL; rcu_read_lock(); pid = rcu_dereference(target->pid); if (pid) task = get_pid_task(target->pid, PIDTYPE_PID); rcu_read_unlock(); if (!task) return false; if (task->flags & PF_VCPU) { put_task_struct(task); return false; } if (yield_to(task, 1)) { put_task_struct(task); return true; } put_task_struct(task); return false; } EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT /* * Helper that checks whether a VCPU is eligible for directed yield. * Most eligible candidate to yield is decided by following heuristics: * * (a) VCPU which has not done pl-exit or cpu relax intercepted recently * (preempted lock holder), indicated by @in_spin_loop. * Set at the beiginning and cleared at the end of interception/PLE handler. * * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get * chance last time (mostly it has become eligible now since we have probably * yielded to lockholder in last iteration. This is done by toggling * @dy_eligible each time a VCPU checked for eligibility.) * * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding * to preempted lock-holder could result in wrong VCPU selection and CPU * burning. Giving priority for a potential lock-holder increases lock * progress. * * Since algorithm is based on heuristics, accessing another VCPU data without * locking does not harm. It may result in trying to yield to same VCPU, fail * and continue with next VCPU and so on. */ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) { bool eligible; eligible = !vcpu->spin_loop.in_spin_loop || (vcpu->spin_loop.in_spin_loop && vcpu->spin_loop.dy_eligible); if (vcpu->spin_loop.in_spin_loop) kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); return eligible; } #endif void kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; int last_boosted_vcpu = me->kvm->last_boosted_vcpu; int yielded = 0; int pass; int i; kvm_vcpu_set_in_spin_loop(me, true); /* * We boost the priority of a VCPU that is runnable but not * currently running, because it got preempted by something * else and called schedule in __vcpu_run. Hopefully that * VCPU is holding the lock that we need and will release it. * We approximate round-robin by starting at the last boosted VCPU. */ for (pass = 0; pass < 2 && !yielded; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!pass && i <= last_boosted_vcpu) { i = last_boosted_vcpu; continue; } else if (pass && i > last_boosted_vcpu) break; if (vcpu == me) continue; if (waitqueue_active(&vcpu->wq)) continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; if (kvm_vcpu_yield_to(vcpu)) { kvm->last_boosted_vcpu = i; yielded = 1; break; } } } kvm_vcpu_set_in_spin_loop(me, false); /* Ensure vcpu is not eligible during next spinloop */ kvm_vcpu_set_dy_eligible(me, false); } EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct kvm_vcpu *vcpu = vma->vm_file->private_data; struct page *page; if (vmf->pgoff == 0) page = virt_to_page(vcpu->run); #ifdef CONFIG_X86 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) page = virt_to_page(vcpu->arch.pio_data); #endif #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); #endif else return kvm_arch_vcpu_fault(vcpu, vmf); get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct kvm_vcpu_vm_ops = { .fault = kvm_vcpu_fault, }; static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) { vma->vm_ops = &kvm_vcpu_vm_ops; return 0; } static int kvm_vcpu_release(struct inode *inode, struct file *filp) { struct kvm_vcpu *vcpu = filp->private_data; kvm_put_kvm(vcpu->kvm); return 0; } static struct file_operations kvm_vcpu_fops = { .release = kvm_vcpu_release, .unlocked_ioctl = kvm_vcpu_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = kvm_vcpu_compat_ioctl, #endif .mmap = kvm_vcpu_mmap, .llseek = noop_llseek, }; /* * Allocates an inode for the vcpu. */ static int create_vcpu_fd(struct kvm_vcpu *vcpu) { return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); } /* * Creates some virtual cpus. Good luck creating more than one. */ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) { int r; struct kvm_vcpu *vcpu, *v; vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) return PTR_ERR(vcpu); preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); r = kvm_arch_vcpu_setup(vcpu); if (r) goto vcpu_destroy; mutex_lock(&kvm->lock); if (!kvm_vcpu_compatible(vcpu)) { r = -EINVAL; goto unlock_vcpu_destroy; } if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { r = -EINVAL; goto unlock_vcpu_destroy; } kvm_for_each_vcpu(r, v, kvm) if (v->vcpu_id == id) { r = -EEXIST; goto unlock_vcpu_destroy; } BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); /* Now it's all set up, let userspace reach it */ kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) { kvm_put_kvm(kvm); goto unlock_vcpu_destroy; } kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; smp_wmb(); atomic_inc(&kvm->online_vcpus); mutex_unlock(&kvm->lock); return r; unlock_vcpu_destroy: mutex_unlock(&kvm->lock); vcpu_destroy: kvm_arch_vcpu_destroy(vcpu); return r; } static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) { if (sigset) { sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); vcpu->sigset_active = 1; vcpu->sigset = *sigset; } else vcpu->sigset_active = 0; return 0; } static long kvm_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; struct kvm_fpu *fpu = NULL; struct kvm_sregs *kvm_sregs = NULL; if (vcpu->kvm->mm != current->mm) return -EIO; #if defined(CONFIG_S390) || defined(CONFIG_PPC) /* * Special cases: vcpu ioctls that are asynchronous to vcpu execution, * so vcpu_load() would break it. */ if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT) return kvm_arch_vcpu_ioctl(filp, ioctl, arg); #endif vcpu_load(vcpu); switch (ioctl) { case KVM_RUN: r = -EINVAL; if (arg) goto out; r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); trace_kvm_userspace_exit(vcpu->run->exit_reason, r); break; case KVM_GET_REGS: { struct kvm_regs *kvm_regs; r = -ENOMEM; kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); if (!kvm_regs) goto out; r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); if (r) goto out_free1; r = -EFAULT; if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) goto out_free1; r = 0; out_free1: kfree(kvm_regs); break; } case KVM_SET_REGS: { struct kvm_regs *kvm_regs; r = -ENOMEM; kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); if (IS_ERR(kvm_regs)) { r = PTR_ERR(kvm_regs); goto out; } r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); if (r) goto out_free2; r = 0; out_free2: kfree(kvm_regs); break; } case KVM_GET_SREGS: { kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); r = -ENOMEM; if (!kvm_sregs) goto out; r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) goto out; r = 0; break; } case KVM_SET_SREGS: { kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); if (IS_ERR(kvm_sregs)) { r = PTR_ERR(kvm_sregs); goto out; } r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); if (r) goto out; r = 0; break; } case KVM_GET_MP_STATE: { struct kvm_mp_state mp_state; r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &mp_state, sizeof mp_state)) goto out; r = 0; break; } case KVM_SET_MP_STATE: { struct kvm_mp_state mp_state; r = -EFAULT; if (copy_from_user(&mp_state, argp, sizeof mp_state)) goto out; r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); if (r) goto out; r = 0; break; } case KVM_TRANSLATE: { struct kvm_translation tr; r = -EFAULT; if (copy_from_user(&tr, argp, sizeof tr)) goto out; r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tr, sizeof tr)) goto out; r = 0; break; } case KVM_SET_GUEST_DEBUG: { struct kvm_guest_debug dbg; r = -EFAULT; if (copy_from_user(&dbg, argp, sizeof dbg)) goto out; r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); if (r) goto out; r = 0; break; } case KVM_SET_SIGNAL_MASK: { struct kvm_signal_mask __user *sigmask_arg = argp; struct kvm_signal_mask kvm_sigmask; sigset_t sigset, *p; p = NULL; if (argp) { r = -EFAULT; if (copy_from_user(&kvm_sigmask, argp, sizeof kvm_sigmask)) goto out; r = -EINVAL; if (kvm_sigmask.len != sizeof sigset) goto out; r = -EFAULT; if (copy_from_user(&sigset, sigmask_arg->sigset, sizeof sigset)) goto out; p = &sigset; } r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); break; } case KVM_GET_FPU: { fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); r = -ENOMEM; if (!fpu) goto out; r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) goto out; r = 0; break; } case KVM_SET_FPU: { fpu = memdup_user(argp, sizeof(*fpu)); if (IS_ERR(fpu)) { r = PTR_ERR(fpu); goto out; } r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); if (r) goto out; r = 0; break; } default: r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); } out: vcpu_put(vcpu); kfree(fpu); kfree(kvm_sregs); return r; } #ifdef CONFIG_COMPAT static long kvm_vcpu_compat_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = compat_ptr(arg); int r; if (vcpu->kvm->mm != current->mm) return -EIO; switch (ioctl) { case KVM_SET_SIGNAL_MASK: { struct kvm_signal_mask __user *sigmask_arg = argp; struct kvm_signal_mask kvm_sigmask; compat_sigset_t csigset; sigset_t sigset; if (argp) { r = -EFAULT; if (copy_from_user(&kvm_sigmask, argp, sizeof kvm_sigmask)) goto out; r = -EINVAL; if (kvm_sigmask.len != sizeof csigset) goto out; r = -EFAULT; if (copy_from_user(&csigset, sigmask_arg->sigset, sizeof csigset)) goto out; } sigset_from_compat(&sigset, &csigset); r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); break; } default: r = kvm_vcpu_ioctl(filp, ioctl, arg); } out: return r; } #endif static long kvm_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r; if (kvm->mm != current->mm) return -EIO; switch (ioctl) { case KVM_CREATE_VCPU: r = kvm_vm_ioctl_create_vcpu(kvm, arg); if (r < 0) goto out; break; case KVM_SET_USER_MEMORY_REGION: { struct kvm_userspace_memory_region kvm_userspace_mem; r = -EFAULT; if (copy_from_user(&kvm_userspace_mem, argp, sizeof kvm_userspace_mem)) goto out; r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); if (r) goto out; break; } case KVM_GET_DIRTY_LOG: { struct kvm_dirty_log log; r = -EFAULT; if (copy_from_user(&log, argp, sizeof log)) goto out; r = kvm_vm_ioctl_get_dirty_log(kvm, &log); if (r) goto out; break; } #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET case KVM_REGISTER_COALESCED_MMIO: { struct kvm_coalesced_mmio_zone zone; r = -EFAULT; if (copy_from_user(&zone, argp, sizeof zone)) goto out; r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); if (r) goto out; r = 0; break; } case KVM_UNREGISTER_COALESCED_MMIO: { struct kvm_coalesced_mmio_zone zone; r = -EFAULT; if (copy_from_user(&zone, argp, sizeof zone)) goto out; r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); if (r) goto out; r = 0; break; } #endif case KVM_IRQFD: { struct kvm_irqfd data; r = -EFAULT; if (copy_from_user(&data, argp, sizeof data)) goto out; r = kvm_irqfd(kvm, &data); break; } case KVM_IOEVENTFD: { struct kvm_ioeventfd data; r = -EFAULT; if (copy_from_user(&data, argp, sizeof data)) goto out; r = kvm_ioeventfd(kvm, &data); break; } #ifdef CONFIG_KVM_APIC_ARCHITECTURE case KVM_SET_BOOT_CPU_ID: r = 0; mutex_lock(&kvm->lock); if (atomic_read(&kvm->online_vcpus) != 0) r = -EBUSY; else kvm->bsp_vcpu_id = arg; mutex_unlock(&kvm->lock); break; #endif #ifdef CONFIG_HAVE_KVM_MSI case KVM_SIGNAL_MSI: { struct kvm_msi msi; r = -EFAULT; if (copy_from_user(&msi, argp, sizeof msi)) goto out; r = kvm_send_userspace_msi(kvm, &msi); break; } #endif #ifdef __KVM_HAVE_IRQ_LINE case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; r = kvm_vm_ioctl_irq_line(kvm, &irq_event); if (r) goto out; r = -EFAULT; if (ioctl == KVM_IRQ_LINE_STATUS) { if (copy_to_user(argp, &irq_event, sizeof irq_event)) goto out; } r = 0; break; } #endif default: r = kvm_arch_vm_ioctl(filp, ioctl, arg); if (r == -ENOTTY) r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); } out: return r; } #ifdef CONFIG_COMPAT struct compat_kvm_dirty_log { __u32 slot; __u32 padding1; union { compat_uptr_t dirty_bitmap; /* one bit per page */ __u64 padding2; }; }; static long kvm_vm_compat_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; int r; if (kvm->mm != current->mm) return -EIO; switch (ioctl) { case KVM_GET_DIRTY_LOG: { struct compat_kvm_dirty_log compat_log; struct kvm_dirty_log log; r = -EFAULT; if (copy_from_user(&compat_log, (void __user *)arg, sizeof(compat_log))) goto out; log.slot = compat_log.slot; log.padding1 = compat_log.padding1; log.padding2 = compat_log.padding2; log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); r = kvm_vm_ioctl_get_dirty_log(kvm, &log); if (r) goto out; break; } default: r = kvm_vm_ioctl(filp, ioctl, arg); } out: return r; } #endif static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page[1]; unsigned long addr; int npages; gfn_t gfn = vmf->pgoff; struct kvm *kvm = vma->vm_file->private_data; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return VM_FAULT_SIGBUS; npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, NULL); if (unlikely(npages != 1)) return VM_FAULT_SIGBUS; vmf->page = page[0]; return 0; } static const struct vm_operations_struct kvm_vm_vm_ops = { .fault = kvm_vm_fault, }; static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) { vma->vm_ops = &kvm_vm_vm_ops; return 0; } static struct file_operations kvm_vm_fops = { .release = kvm_vm_release, .unlocked_ioctl = kvm_vm_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = kvm_vm_compat_ioctl, #endif .mmap = kvm_vm_mmap, .llseek = noop_llseek, }; static int kvm_dev_ioctl_create_vm(unsigned long type) { int r; struct kvm *kvm; kvm = kvm_create_vm(type); if (IS_ERR(kvm)) return PTR_ERR(kvm); #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET r = kvm_coalesced_mmio_init(kvm); if (r < 0) { kvm_put_kvm(kvm); return r; } #endif r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); if (r < 0) kvm_put_kvm(kvm); return r; } static long kvm_dev_ioctl_check_extension_generic(long arg) { switch (arg) { case KVM_CAP_USER_MEMORY: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: #ifdef CONFIG_KVM_APIC_ARCHITECTURE case KVM_CAP_SET_BOOT_CPU_ID: #endif case KVM_CAP_INTERNAL_ERROR_DATA: #ifdef CONFIG_HAVE_KVM_MSI case KVM_CAP_SIGNAL_MSI: #endif return 1; #ifdef KVM_CAP_IRQ_ROUTING case KVM_CAP_IRQ_ROUTING: return KVM_MAX_IRQ_ROUTES; #endif default: break; } return kvm_dev_ioctl_check_extension(arg); } static long kvm_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { long r = -EINVAL; switch (ioctl) { case KVM_GET_API_VERSION: r = -EINVAL; if (arg) goto out; r = KVM_API_VERSION; break; case KVM_CREATE_VM: r = kvm_dev_ioctl_create_vm(arg); break; case KVM_CHECK_EXTENSION: r = kvm_dev_ioctl_check_extension_generic(arg); break; case KVM_GET_VCPU_MMAP_SIZE: r = -EINVAL; if (arg) goto out; r = PAGE_SIZE; /* struct kvm_run */ #ifdef CONFIG_X86 r += PAGE_SIZE; /* pio data page */ #endif #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET r += PAGE_SIZE; /* coalesced mmio ring page */ #endif break; case KVM_TRACE_ENABLE: case KVM_TRACE_PAUSE: case KVM_TRACE_DISABLE: r = -EOPNOTSUPP; break; default: return kvm_arch_dev_ioctl(filp, ioctl, arg); } out: return r; } static struct file_operations kvm_chardev_ops = { .unlocked_ioctl = kvm_dev_ioctl, .compat_ioctl = kvm_dev_ioctl, .llseek = noop_llseek, }; static struct miscdevice kvm_dev = { KVM_MINOR, "kvm", &kvm_chardev_ops, }; static void hardware_enable_nolock(void *junk) { int cpu = raw_smp_processor_id(); int r; if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) return; cpumask_set_cpu(cpu, cpus_hardware_enabled); r = kvm_arch_hardware_enable(NULL); if (r) { cpumask_clear_cpu(cpu, cpus_hardware_enabled); atomic_inc(&hardware_enable_failed); printk(KERN_INFO "kvm: enabling virtualization on " "CPU%d failed\n", cpu); } } static void hardware_enable(void *junk) { raw_spin_lock(&kvm_lock); hardware_enable_nolock(junk); raw_spin_unlock(&kvm_lock); } static void hardware_disable_nolock(void *junk) { int cpu = raw_smp_processor_id(); if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) return; cpumask_clear_cpu(cpu, cpus_hardware_enabled); kvm_arch_hardware_disable(NULL); } static void hardware_disable(void *junk) { raw_spin_lock(&kvm_lock); hardware_disable_nolock(junk); raw_spin_unlock(&kvm_lock); } static void hardware_disable_all_nolock(void) { BUG_ON(!kvm_usage_count); kvm_usage_count--; if (!kvm_usage_count) on_each_cpu(hardware_disable_nolock, NULL, 1); } static void hardware_disable_all(void) { raw_spin_lock(&kvm_lock); hardware_disable_all_nolock(); raw_spin_unlock(&kvm_lock); } static int hardware_enable_all(void) { int r = 0; raw_spin_lock(&kvm_lock); kvm_usage_count++; if (kvm_usage_count == 1) { atomic_set(&hardware_enable_failed, 0); on_each_cpu(hardware_enable_nolock, NULL, 1); if (atomic_read(&hardware_enable_failed)) { hardware_disable_all_nolock(); r = -EBUSY; } } raw_spin_unlock(&kvm_lock); return r; } static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, void *v) { int cpu = (long)v; if (!kvm_usage_count) return NOTIFY_OK; val &= ~CPU_TASKS_FROZEN; switch (val) { case CPU_DYING: printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", cpu); hardware_disable(NULL); break; case CPU_STARTING: printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", cpu); hardware_enable(NULL); break; } return NOTIFY_OK; } asmlinkage void kvm_spurious_fault(void) { /* Fault while not rebooting. We want the trace. */ BUG(); } EXPORT_SYMBOL_GPL(kvm_spurious_fault); static int kvm_reboot(struct notifier_block *notifier, unsigned long val, void *v) { /* * Some (well, at least mine) BIOSes hang on reboot if * in vmx root mode. * * And Intel TXT required VMX off for all cpu when system shutdown. */ printk(KERN_INFO "kvm: exiting hardware virtualization\n"); kvm_rebooting = true; on_each_cpu(hardware_disable_nolock, NULL, 1); return NOTIFY_OK; } static struct notifier_block kvm_reboot_notifier = { .notifier_call = kvm_reboot, .priority = 0, }; static void kvm_io_bus_destroy(struct kvm_io_bus *bus) { int i; for (i = 0; i < bus->dev_count; i++) { struct kvm_io_device *pos = bus->range[i].dev; kvm_iodevice_destructor(pos); } kfree(bus); } int kvm_io_bus_sort_cmp(const void *p1, const void *p2) { const struct kvm_io_range *r1 = p1; const struct kvm_io_range *r2 = p2; if (r1->addr < r2->addr) return -1; if (r1->addr + r1->len > r2->addr + r2->len) return 1; return 0; } int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, gpa_t addr, int len) { bus->range[bus->dev_count++] = (struct kvm_io_range) { .addr = addr, .len = len, .dev = dev, }; sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp, NULL); return 0; } int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, gpa_t addr, int len) { struct kvm_io_range *range, key; int off; key = (struct kvm_io_range) { .addr = addr, .len = len, }; range = bsearch(&key, bus->range, bus->dev_count, sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); if (range == NULL) return -ENOENT; off = range - bus->range; while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0) off--; return off; } /* kvm_io_bus_write - called under kvm->slots_lock */ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val) { int idx; struct kvm_io_bus *bus; struct kvm_io_range range; range = (struct kvm_io_range) { .addr = addr, .len = len, }; bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); idx = kvm_io_bus_get_first_dev(bus, addr, len); if (idx < 0) return -EOPNOTSUPP; while (idx < bus->dev_count && kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) { if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val)) return 0; idx++; } return -EOPNOTSUPP; } /* kvm_io_bus_read - called under kvm->slots_lock */ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, void *val) { int idx; struct kvm_io_bus *bus; struct kvm_io_range range; range = (struct kvm_io_range) { .addr = addr, .len = len, }; bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); idx = kvm_io_bus_get_first_dev(bus, addr, len); if (idx < 0) return -EOPNOTSUPP; while (idx < bus->dev_count && kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) { if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val)) return 0; idx++; } return -EOPNOTSUPP; } /* Caller must hold slots_lock. */ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev) { struct kvm_io_bus *new_bus, *bus; bus = kvm->buses[bus_idx]; if (bus->dev_count > NR_IOBUS_DEVS - 1) return -ENOSPC; new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) * sizeof(struct kvm_io_range)), GFP_KERNEL); if (!new_bus) return -ENOMEM; memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * sizeof(struct kvm_io_range))); kvm_io_bus_insert_dev(new_bus, dev, addr, len); rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus); return 0; } /* Caller must hold slots_lock. */ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev) { int i, r; struct kvm_io_bus *new_bus, *bus; bus = kvm->buses[bus_idx]; r = -ENOENT; for (i = 0; i < bus->dev_count; i++) if (bus->range[i].dev == dev) { r = 0; break; } if (r) return r; new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) * sizeof(struct kvm_io_range)), GFP_KERNEL); if (!new_bus) return -ENOMEM; memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); new_bus->dev_count--; memcpy(new_bus->range + i, bus->range + i + 1, (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus); return r; } static struct notifier_block kvm_cpu_notifier = { .notifier_call = kvm_cpu_hotplug, }; static int vm_stat_get(void *_offset, u64 *val) { unsigned offset = (long)_offset; struct kvm *kvm; *val = 0; raw_spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) *val += *(u32 *)((void *)kvm + offset); raw_spin_unlock(&kvm_lock); return 0; } DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); static int vcpu_stat_get(void *_offset, u64 *val) { unsigned offset = (long)_offset; struct kvm *kvm; struct kvm_vcpu *vcpu; int i; *val = 0; raw_spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) *val += *(u32 *)((void *)vcpu + offset); raw_spin_unlock(&kvm_lock); return 0; } DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); static const struct file_operations *stat_fops[] = { [KVM_STAT_VCPU] = &vcpu_stat_fops, [KVM_STAT_VM] = &vm_stat_fops, }; static int kvm_init_debug(void) { int r = -EFAULT; struct kvm_stats_debugfs_item *p; kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); if (kvm_debugfs_dir == NULL) goto out; for (p = debugfs_entries; p->name; ++p) { p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, (void *)(long)p->offset, stat_fops[p->kind]); if (p->dentry == NULL) goto out_dir; } return 0; out_dir: debugfs_remove_recursive(kvm_debugfs_dir); out: return r; } static void kvm_exit_debug(void) { struct kvm_stats_debugfs_item *p; for (p = debugfs_entries; p->name; ++p) debugfs_remove(p->dentry); debugfs_remove(kvm_debugfs_dir); } static int kvm_suspend(void) { if (kvm_usage_count) hardware_disable_nolock(NULL); return 0; } static void kvm_resume(void) { if (kvm_usage_count) { WARN_ON(raw_spin_is_locked(&kvm_lock)); hardware_enable_nolock(NULL); } } static struct syscore_ops kvm_syscore_ops = { .suspend = kvm_suspend, .resume = kvm_resume, }; static inline struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) { return container_of(pn, struct kvm_vcpu, preempt_notifier); } static void kvm_sched_in(struct preempt_notifier *pn, int cpu) { struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); kvm_arch_vcpu_load(vcpu, cpu); } static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next) { struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); kvm_arch_vcpu_put(vcpu); } int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, struct module *module) { int r; int cpu; r = kvm_arch_init(opaque); if (r) goto out_fail; if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; goto out_free_0; } r = kvm_arch_hardware_setup(); if (r < 0) goto out_free_0a; for_each_online_cpu(cpu) { smp_call_function_single(cpu, kvm_arch_check_processor_compat, &r, 1); if (r < 0) goto out_free_1; } r = register_cpu_notifier(&kvm_cpu_notifier); if (r) goto out_free_2; register_reboot_notifier(&kvm_reboot_notifier); /* A kmem cache lets us meet the alignment requirements of fx_save. */ if (!vcpu_align) vcpu_align = __alignof__(struct kvm_vcpu); kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 0, NULL); if (!kvm_vcpu_cache) { r = -ENOMEM; goto out_free_3; } r = kvm_async_pf_init(); if (r) goto out_free; kvm_chardev_ops.owner = module; kvm_vm_fops.owner = module; kvm_vcpu_fops.owner = module; r = misc_register(&kvm_dev); if (r) { printk(KERN_ERR "kvm: misc device register failed\n"); goto out_unreg; } register_syscore_ops(&kvm_syscore_ops); kvm_preempt_ops.sched_in = kvm_sched_in; kvm_preempt_ops.sched_out = kvm_sched_out; r = kvm_init_debug(); if (r) { printk(KERN_ERR "kvm: create debugfs files failed\n"); goto out_undebugfs; } return 0; out_undebugfs: unregister_syscore_ops(&kvm_syscore_ops); out_unreg: kvm_async_pf_deinit(); out_free: kmem_cache_destroy(kvm_vcpu_cache); out_free_3: unregister_reboot_notifier(&kvm_reboot_notifier); unregister_cpu_notifier(&kvm_cpu_notifier); out_free_2: out_free_1: kvm_arch_hardware_unsetup(); out_free_0a: free_cpumask_var(cpus_hardware_enabled); out_free_0: kvm_arch_exit(); out_fail: return r; } EXPORT_SYMBOL_GPL(kvm_init); void kvm_exit(void) { kvm_exit_debug(); misc_deregister(&kvm_dev); kmem_cache_destroy(kvm_vcpu_cache); kvm_async_pf_deinit(); unregister_syscore_ops(&kvm_syscore_ops); unregister_reboot_notifier(&kvm_reboot_notifier); unregister_cpu_notifier(&kvm_cpu_notifier); on_each_cpu(hardware_disable_nolock, NULL, 1); kvm_arch_hardware_unsetup(); kvm_arch_exit(); free_cpumask_var(cpus_hardware_enabled); } EXPORT_SYMBOL_GPL(kvm_exit);
./CrossVul/dataset_final_sorted/CWE-399/c/good_5768_0
crossvul-cpp_data_bad_2392_5
/* * Copyright (c) Ian F. Darwin 1986-1995. * Software written by Ian F. Darwin and others; * maintained 1995-present by Christos Zoulas and others. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * file - find type of a file or files - main program. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: file.c,v 1.158 2014/11/28 02:35:05 christos Exp $") #endif /* lint */ #include "magic.h" #include <stdlib.h> #include <unistd.h> #include <string.h> #ifdef RESTORE_TIME # if (__COHERENT__ >= 0x420) # include <sys/utime.h> # else # ifdef USE_UTIMES # include <sys/time.h> # else # include <utime.h> # endif # endif #endif #ifdef HAVE_UNISTD_H #include <unistd.h> /* for read() */ #endif #ifdef HAVE_WCHAR_H #include <wchar.h> #endif #if defined(HAVE_GETOPT_H) && defined(HAVE_STRUCT_OPTION) #include <getopt.h> #ifndef HAVE_GETOPT_LONG int getopt_long(int argc, char * const *argv, const char *optstring, const struct option *longopts, int *longindex); #endif #else #include "mygetopt.h" #endif #ifdef S_IFLNK #define FILE_FLAGS "-bcEhikLlNnprsvz0" #else #define FILE_FLAGS "-bcEiklNnprsvz0" #endif # define USAGE \ "Usage: %s [" FILE_FLAGS \ "] [--apple] [--mime-encoding] [--mime-type]\n" \ " [-e testname] [-F separator] [-f namefile] [-m magicfiles] " \ "file ...\n" \ " %s -C [-m magicfiles]\n" \ " %s [--help]\n" private int /* Global command-line options */ bflag = 0, /* brief output format */ nopad = 0, /* Don't pad output */ nobuffer = 0, /* Do not buffer stdout */ nulsep = 0; /* Append '\0' to the separator */ private const char *separator = ":"; /* Default field separator */ private const struct option long_options[] = { #define OPT(shortname, longname, opt, doc) \ {longname, opt, NULL, shortname}, #define OPT_LONGONLY(longname, opt, doc) \ {longname, opt, NULL, 0}, #include "file_opts.h" #undef OPT #undef OPT_LONGONLY {0, 0, NULL, 0} }; #define OPTSTRING "bcCde:Ef:F:hiklLm:nNpP:rsvz0" private const struct { const char *name; int value; } nv[] = { { "apptype", MAGIC_NO_CHECK_APPTYPE }, { "ascii", MAGIC_NO_CHECK_ASCII }, { "cdf", MAGIC_NO_CHECK_CDF }, { "compress", MAGIC_NO_CHECK_COMPRESS }, { "elf", MAGIC_NO_CHECK_ELF }, { "encoding", MAGIC_NO_CHECK_ENCODING }, { "soft", MAGIC_NO_CHECK_SOFT }, { "tar", MAGIC_NO_CHECK_TAR }, { "text", MAGIC_NO_CHECK_TEXT }, /* synonym for ascii */ { "tokens", MAGIC_NO_CHECK_TOKENS }, /* OBSOLETE: ignored for backwards compatibility */ }; private struct { const char *name; int tag; size_t value; } pm[] = { { "indir", MAGIC_PARAM_INDIR_MAX, 0 }, { "name", MAGIC_PARAM_NAME_MAX, 0 }, { "elf_phnum", MAGIC_PARAM_ELF_PHNUM_MAX, 0 }, { "elf_shnum", MAGIC_PARAM_ELF_SHNUM_MAX, 0 }, }; private char *progname; /* used throughout */ private void usage(void); private void docprint(const char *); private void help(void); private int unwrap(struct magic_set *, const char *); private int process(struct magic_set *ms, const char *, int); private struct magic_set *load(const char *, int); private void setparam(const char *); private void applyparam(magic_t); /* * main - parse arguments and handle options */ int main(int argc, char *argv[]) { int c; size_t i; int action = 0, didsomefiles = 0, errflg = 0; int flags = 0, e = 0; struct magic_set *magic = NULL; int longindex; const char *magicfile = NULL; /* where the magic is */ /* makes islower etc work for other langs */ #ifdef HAVE_SETLOCALE (void)setlocale(LC_CTYPE, ""); #endif #ifdef __EMX__ /* sh-like wildcard expansion! Shouldn't hurt at least ... */ _wildcard(&argc, &argv); #endif if ((progname = strrchr(argv[0], '/')) != NULL) progname++; else progname = argv[0]; #ifdef S_IFLNK flags |= getenv("POSIXLY_CORRECT") ? MAGIC_SYMLINK : 0; #endif while ((c = getopt_long(argc, argv, OPTSTRING, long_options, &longindex)) != -1) switch (c) { case 0 : switch (longindex) { case 0: help(); break; case 10: flags |= MAGIC_APPLE; break; case 11: flags |= MAGIC_MIME_TYPE; break; case 12: flags |= MAGIC_MIME_ENCODING; break; } break; case '0': nulsep = 1; break; case 'b': bflag++; break; case 'c': action = FILE_CHECK; break; case 'C': action = FILE_COMPILE; break; case 'd': flags |= MAGIC_DEBUG|MAGIC_CHECK; break; case 'E': flags |= MAGIC_ERROR; break; case 'e': for (i = 0; i < sizeof(nv) / sizeof(nv[0]); i++) if (strcmp(nv[i].name, optarg) == 0) break; if (i == sizeof(nv) / sizeof(nv[0])) errflg++; else flags |= nv[i].value; break; case 'f': if(action) usage(); if (magic == NULL) if ((magic = load(magicfile, flags)) == NULL) return 1; e |= unwrap(magic, optarg); ++didsomefiles; break; case 'F': separator = optarg; break; case 'i': flags |= MAGIC_MIME; break; case 'k': flags |= MAGIC_CONTINUE; break; case 'l': action = FILE_LIST; break; case 'm': magicfile = optarg; break; case 'n': ++nobuffer; break; case 'N': ++nopad; break; #if defined(HAVE_UTIME) || defined(HAVE_UTIMES) case 'p': flags |= MAGIC_PRESERVE_ATIME; break; #endif case 'P': setparam(optarg); break; case 'r': flags |= MAGIC_RAW; break; break; case 's': flags |= MAGIC_DEVICES; break; case 'v': if (magicfile == NULL) magicfile = magic_getpath(magicfile, action); (void)fprintf(stdout, "%s-%s\n", progname, VERSION); (void)fprintf(stdout, "magic file from %s\n", magicfile); return 0; case 'z': flags |= MAGIC_COMPRESS; break; #ifdef S_IFLNK case 'L': flags |= MAGIC_SYMLINK; break; case 'h': flags &= ~MAGIC_SYMLINK; break; #endif case '?': default: errflg++; break; } if (errflg) { usage(); } if (e) return e; if (MAGIC_VERSION != magic_version()) (void)fprintf(stderr, "%s: compiled magic version [%d] " "does not match with shared library magic version [%d]\n", progname, MAGIC_VERSION, magic_version()); switch(action) { case FILE_CHECK: case FILE_COMPILE: case FILE_LIST: /* * Don't try to check/compile ~/.magic unless we explicitly * ask for it. */ magic = magic_open(flags|MAGIC_CHECK); if (magic == NULL) { (void)fprintf(stderr, "%s: %s\n", progname, strerror(errno)); return 1; } switch(action) { case FILE_CHECK: c = magic_check(magic, magicfile); break; case FILE_COMPILE: c = magic_compile(magic, magicfile); break; case FILE_LIST: c = magic_list(magic, magicfile); break; default: abort(); } if (c == -1) { (void)fprintf(stderr, "%s: %s\n", progname, magic_error(magic)); return 1; } return 0; default: if (magic == NULL) if ((magic = load(magicfile, flags)) == NULL) return 1; applyparam(magic); } if (optind == argc) { if (!didsomefiles) usage(); } else { size_t j, wid, nw; for (wid = 0, j = (size_t)optind; j < (size_t)argc; j++) { nw = file_mbswidth(argv[j]); if (nw > wid) wid = nw; } /* * If bflag is only set twice, set it depending on * number of files [this is undocumented, and subject to change] */ if (bflag == 2) { bflag = optind >= argc - 1; } for (; optind < argc; optind++) e |= process(magic, argv[optind], wid); } if (magic) magic_close(magic); return e; } private void applyparam(magic_t magic) { size_t i; for (i = 0; i < __arraycount(pm); i++) { if (pm[i].value == 0) continue; if (magic_setparam(magic, pm[i].tag, &pm[i].value) == -1) { (void)fprintf(stderr, "%s: Can't set %s %s\n", progname, pm[i].name, strerror(errno)); exit(1); } } } private void setparam(const char *p) { size_t i; char *s; if ((s = strchr(p, '=')) == NULL) goto badparm; for (i = 0; i < __arraycount(pm); i++) { if (strncmp(p, pm[i].name, s - p) != 0) continue; pm[i].value = atoi(s + 1); return; } badparm: (void)fprintf(stderr, "%s: Unknown param %s\n", progname, p); exit(1); } private struct magic_set * /*ARGSUSED*/ load(const char *magicfile, int flags) { struct magic_set *magic = magic_open(flags); if (magic == NULL) { (void)fprintf(stderr, "%s: %s\n", progname, strerror(errno)); return NULL; } if (magic_load(magic, magicfile) == -1) { (void)fprintf(stderr, "%s: %s\n", progname, magic_error(magic)); magic_close(magic); return NULL; } return magic; } /* * unwrap -- read a file of filenames, do each one. */ private int unwrap(struct magic_set *ms, const char *fn) { FILE *f; ssize_t len; char *line = NULL; size_t llen = 0; int wid = 0, cwid; int e = 0; if (strcmp("-", fn) == 0) { f = stdin; wid = 1; } else { if ((f = fopen(fn, "r")) == NULL) { (void)fprintf(stderr, "%s: Cannot open `%s' (%s).\n", progname, fn, strerror(errno)); return 1; } while ((len = getline(&line, &llen, f)) > 0) { if (line[len - 1] == '\n') line[len - 1] = '\0'; cwid = file_mbswidth(line); if (cwid > wid) wid = cwid; } rewind(f); } while ((len = getline(&line, &llen, f)) > 0) { if (line[len - 1] == '\n') line[len - 1] = '\0'; e |= process(ms, line, wid); if(nobuffer) (void)fflush(stdout); } free(line); (void)fclose(f); return e; } /* * Called for each input file on the command line (or in a list of files) */ private int process(struct magic_set *ms, const char *inname, int wid) { const char *type; int std_in = strcmp(inname, "-") == 0; if (wid > 0 && !bflag) { (void)printf("%s", std_in ? "/dev/stdin" : inname); if (nulsep) (void)putc('\0', stdout); (void)printf("%s", separator); (void)printf("%*s ", (int) (nopad ? 0 : (wid - file_mbswidth(inname))), ""); } type = magic_file(ms, std_in ? NULL : inname); if (type == NULL) { (void)printf("ERROR: %s\n", magic_error(ms)); return 1; } else { (void)printf("%s\n", type); return 0; } } protected size_t file_mbswidth(const char *s) { #if defined(HAVE_WCHAR_H) && defined(HAVE_MBRTOWC) && defined(HAVE_WCWIDTH) size_t bytesconsumed, old_n, n, width = 0; mbstate_t state; wchar_t nextchar; (void)memset(&state, 0, sizeof(mbstate_t)); old_n = n = strlen(s); while (n > 0) { bytesconsumed = mbrtowc(&nextchar, s, n, &state); if (bytesconsumed == (size_t)(-1) || bytesconsumed == (size_t)(-2)) { /* Something went wrong, return something reasonable */ return old_n; } if (s[0] == '\n') { /* * do what strlen() would do, so that caller * is always right */ width++; } else { int w = wcwidth(nextchar); if (w > 0) width += w; } s += bytesconsumed, n -= bytesconsumed; } return width; #else return strlen(s); #endif } private void usage(void) { (void)fprintf(stderr, USAGE, progname, progname, progname); exit(1); } private void docprint(const char *opts) { size_t i; int comma; char *sp, *p; p = strstr(opts, "%o"); if (p == NULL) { fprintf(stdout, "%s", opts); return; } for (sp = p - 1; sp > opts && *sp == ' '; sp--) continue; fprintf(stdout, "%.*s", (int)(p - opts), opts); comma = 0; for (i = 0; i < __arraycount(nv); i++) { fprintf(stdout, "%s%s", comma++ ? ", " : "", nv[i].name); if (i && i % 5 == 0) { fprintf(stdout, ",\n%*s", (int)(p - sp - 1), ""); comma = 0; } } fprintf(stdout, "%s", opts + (p - opts) + 2); } private void help(void) { (void)fputs( "Usage: file [OPTION...] [FILE...]\n" "Determine type of FILEs.\n" "\n", stdout); #define OPT(shortname, longname, opt, doc) \ fprintf(stdout, " -%c, --" longname, shortname), \ docprint(doc); #define OPT_LONGONLY(longname, opt, doc) \ fprintf(stdout, " --" longname), \ docprint(doc); #include "file_opts.h" #undef OPT #undef OPT_LONGONLY fprintf(stdout, "\nReport bugs to http://bugs.gw.com/\n"); exit(0); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2392_5
crossvul-cpp_data_bad_3486_0
/* * Hardware performance events for the Alpha. * * We implement HW counts on the EV67 and subsequent CPUs only. * * (C) 2010 Michael J. Cree * * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and * ARM code, which are copyright by their respective authors. */ #include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/kernel.h> #include <linux/kdebug.h> #include <linux/mutex.h> #include <linux/init.h> #include <asm/hwrpb.h> #include <asm/atomic.h> #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/pal.h> #include <asm/wrperfmon.h> #include <asm/hw_irq.h> /* The maximum number of PMCs on any Alpha CPU whatsoever. */ #define MAX_HWEVENTS 3 #define PMC_NO_INDEX -1 /* For tracking PMCs and the hw events they monitor on each CPU. */ struct cpu_hw_events { int enabled; /* Number of events scheduled; also number entries valid in arrays below. */ int n_events; /* Number events added since last hw_perf_disable(). */ int n_added; /* Events currently scheduled. */ struct perf_event *event[MAX_HWEVENTS]; /* Event type of each scheduled event. */ unsigned long evtype[MAX_HWEVENTS]; /* Current index of each scheduled event; if not yet determined * contains PMC_NO_INDEX. */ int current_idx[MAX_HWEVENTS]; /* The active PMCs' config for easy use with wrperfmon(). */ unsigned long config; /* The active counters' indices for easy use with wrperfmon(). */ unsigned long idx_mask; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); /* * A structure to hold the description of the PMCs available on a particular * type of Alpha CPU. */ struct alpha_pmu_t { /* Mapping of the perf system hw event types to indigenous event types */ const int *event_map; /* The number of entries in the event_map */ int max_events; /* The number of PMCs on this Alpha */ int num_pmcs; /* * All PMC counters reside in the IBOX register PCTR. This is the * LSB of the counter. */ int pmc_count_shift[MAX_HWEVENTS]; /* * The mask that isolates the PMC bits when the LSB of the counter * is shifted to bit 0. */ unsigned long pmc_count_mask[MAX_HWEVENTS]; /* The maximum period the PMC can count. */ unsigned long pmc_max_period[MAX_HWEVENTS]; /* * The maximum value that may be written to the counter due to * hardware restrictions is pmc_max_period - pmc_left. */ long pmc_left[3]; /* Subroutine for allocation of PMCs. Enforces constraints. */ int (*check_constraints)(struct perf_event **, unsigned long *, int); }; /* * The Alpha CPU PMU description currently in operation. This is set during * the boot process to the specific CPU of the machine. */ static const struct alpha_pmu_t *alpha_pmu; #define HW_OP_UNSUPPORTED -1 /* * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs * follow. Since they are identical we refer to them collectively as the * EV67 henceforth. */ /* * EV67 PMC event types * * There is no one-to-one mapping of the possible hw event types to the * actual codes that are used to program the PMCs hence we introduce our * own hw event type identifiers. */ enum ev67_pmc_event_type { EV67_CYCLES = 1, EV67_INSTRUCTIONS, EV67_BCACHEMISS, EV67_MBOXREPLAY, EV67_LAST_ET }; #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES) /* Mapping of the hw event types to the perf tool interface */ static const int ev67_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS, [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS, }; struct ev67_mapping_t { int config; int idx; }; /* * The mapping used for one event only - these must be in same order as enum * ev67_pmc_event_type definition. */ static const struct ev67_mapping_t ev67_mapping[] = { {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */ {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */ {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */ {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */ }; /* * Check that a group of events can be simultaneously scheduled on to the * EV67 PMU. Also allocate counter indices and config. */ static int ev67_check_constraints(struct perf_event **event, unsigned long *evtype, int n_ev) { int idx0; unsigned long config; idx0 = ev67_mapping[evtype[0]-1].idx; config = ev67_mapping[evtype[0]-1].config; if (n_ev == 1) goto success; BUG_ON(n_ev != 2); if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) { /* MBOX replay traps must be on PMC 1 */ idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0; /* Only cycles can accompany MBOX replay traps */ if (evtype[idx0] == EV67_CYCLES) { config = EV67_PCTR_CYCLES_MBOX; goto success; } } if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) { /* Bcache misses must be on PMC 1 */ idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0; /* Only instructions can accompany Bcache misses */ if (evtype[idx0] == EV67_INSTRUCTIONS) { config = EV67_PCTR_INSTR_BCACHEMISS; goto success; } } if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) { /* Instructions must be on PMC 0 */ idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1; /* By this point only cycles can accompany instructions */ if (evtype[idx0^1] == EV67_CYCLES) { config = EV67_PCTR_INSTR_CYCLES; goto success; } } /* Otherwise, darn it, there is a conflict. */ return -1; success: event[0]->hw.idx = idx0; event[0]->hw.config_base = config; if (n_ev == 2) { event[1]->hw.idx = idx0 ^ 1; event[1]->hw.config_base = config; } return 0; } static const struct alpha_pmu_t ev67_pmu = { .event_map = ev67_perfmon_event_map, .max_events = ARRAY_SIZE(ev67_perfmon_event_map), .num_pmcs = 2, .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0}, .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, .pmc_left = {16, 4, 0}, .check_constraints = ev67_check_constraints }; /* * Helper routines to ensure that we read/write only the correct PMC bits * when calling the wrperfmon PALcall. */ static inline void alpha_write_pmc(int idx, unsigned long val) { val &= alpha_pmu->pmc_count_mask[idx]; val <<= alpha_pmu->pmc_count_shift[idx]; val |= (1<<idx); wrperfmon(PERFMON_CMD_WRITE, val); } static inline unsigned long alpha_read_pmc(int idx) { unsigned long val; val = wrperfmon(PERFMON_CMD_READ, 0); val >>= alpha_pmu->pmc_count_shift[idx]; val &= alpha_pmu->pmc_count_mask[idx]; return val; } /* Set a new period to sample over */ static int alpha_perf_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { long left = local64_read(&hwc->period_left); long period = hwc->sample_period; int ret = 0; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } /* * Hardware restrictions require that the counters must not be * written with values that are too close to the maximum period. */ if (unlikely(left < alpha_pmu->pmc_left[idx])) left = alpha_pmu->pmc_left[idx]; if (left > (long)alpha_pmu->pmc_max_period[idx]) left = alpha_pmu->pmc_max_period[idx]; local64_set(&hwc->prev_count, (unsigned long)(-left)); alpha_write_pmc(idx, (unsigned long)(-left)); perf_event_update_userpage(event); return ret; } /* * Calculates the count (the 'delta') since the last time the PMC was read. * * As the PMCs' full period can easily be exceeded within the perf system * sampling period we cannot use any high order bits as a guard bit in the * PMCs to detect overflow as is done by other architectures. The code here * calculates the delta on the basis that there is no overflow when ovf is * zero. The value passed via ovf by the interrupt handler corrects for * overflow. * * This can be racey on rare occasions -- a call to this routine can occur * with an overflowed counter just before the PMI service routine is called. * The check for delta negative hopefully always rectifies this situation. */ static unsigned long alpha_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx, long ovf) { long prev_raw_count, new_raw_count; long delta; again: prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = alpha_read_pmc(idx); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; /* It is possible on very rare occasions that the PMC has overflowed * but the interrupt is yet to come. Detect and fix this situation. */ if (unlikely(delta < 0)) { delta += alpha_pmu->pmc_max_period[idx] + 1; } local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); return new_raw_count; } /* * Collect all HW events into the array event[]. */ static int collect_events(struct perf_event *group, int max_count, struct perf_event *event[], unsigned long *evtype, int *current_idx) { struct perf_event *pe; int n = 0; if (!is_software_event(group)) { if (n >= max_count) return -1; event[n] = group; evtype[n] = group->hw.event_base; current_idx[n++] = PMC_NO_INDEX; } list_for_each_entry(pe, &group->sibling_list, group_entry) { if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; event[n] = pe; evtype[n] = pe->hw.event_base; current_idx[n++] = PMC_NO_INDEX; } } return n; } /* * Check that a group of events can be simultaneously scheduled on to the PMU. */ static int alpha_check_constraints(struct perf_event **events, unsigned long *evtypes, int n_ev) { /* No HW events is possible from hw_perf_group_sched_in(). */ if (n_ev == 0) return 0; if (n_ev > alpha_pmu->num_pmcs) return -1; return alpha_pmu->check_constraints(events, evtypes, n_ev); } /* * If new events have been scheduled then update cpuc with the new * configuration. This may involve shifting cycle counts from one PMC to * another. */ static void maybe_change_configuration(struct cpu_hw_events *cpuc) { int j; if (cpuc->n_added == 0) return; /* Find counters that are moving to another PMC and update */ for (j = 0; j < cpuc->n_events; j++) { struct perf_event *pe = cpuc->event[j]; if (cpuc->current_idx[j] != PMC_NO_INDEX && cpuc->current_idx[j] != pe->hw.idx) { alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); cpuc->current_idx[j] = PMC_NO_INDEX; } } /* Assign to counters all unassigned events. */ cpuc->idx_mask = 0; for (j = 0; j < cpuc->n_events; j++) { struct perf_event *pe = cpuc->event[j]; struct hw_perf_event *hwc = &pe->hw; int idx = hwc->idx; if (cpuc->current_idx[j] == PMC_NO_INDEX) { alpha_perf_event_set_period(pe, hwc, idx); cpuc->current_idx[j] = idx; } if (!(hwc->state & PERF_HES_STOPPED)) cpuc->idx_mask |= (1<<cpuc->current_idx[j]); } cpuc->config = cpuc->event[0]->hw.config_base; } /* Schedule perf HW event on to PMU. * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ static int alpha_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int n0; int ret; unsigned long irq_flags; /* * The Sparc code has the IRQ disable first followed by the perf * disable, however this can lead to an overflowed counter with the * PMI disabled on rare occasions. The alpha_perf_event_update() * routine should detect this situation by noting a negative delta, * nevertheless we disable the PMCs first to enable a potential * final PMI to occur before we disable interrupts. */ perf_pmu_disable(event->pmu); local_irq_save(irq_flags); /* Default to error to be returned */ ret = -EAGAIN; /* Insert event on to PMU and if successful modify ret to valid return */ n0 = cpuc->n_events; if (n0 < alpha_pmu->num_pmcs) { cpuc->event[n0] = event; cpuc->evtype[n0] = event->hw.event_base; cpuc->current_idx[n0] = PMC_NO_INDEX; if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { cpuc->n_events++; cpuc->n_added++; ret = 0; } } hwc->state = PERF_HES_UPTODATE; if (!(flags & PERF_EF_START)) hwc->state |= PERF_HES_STOPPED; local_irq_restore(irq_flags); perf_pmu_enable(event->pmu); return ret; } /* Disable performance monitoring unit * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ static void alpha_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; unsigned long irq_flags; int j; perf_pmu_disable(event->pmu); local_irq_save(irq_flags); for (j = 0; j < cpuc->n_events; j++) { if (event == cpuc->event[j]) { int idx = cpuc->current_idx[j]; /* Shift remaining entries down into the existing * slot. */ while (++j < cpuc->n_events) { cpuc->event[j - 1] = cpuc->event[j]; cpuc->evtype[j - 1] = cpuc->evtype[j]; cpuc->current_idx[j - 1] = cpuc->current_idx[j]; } /* Absorb the final count and turn off the event. */ alpha_perf_event_update(event, hwc, idx, 0); perf_event_update_userpage(event); cpuc->idx_mask &= ~(1UL<<idx); cpuc->n_events--; break; } } local_irq_restore(irq_flags); perf_pmu_enable(event->pmu); } static void alpha_pmu_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; alpha_perf_event_update(event, hwc, hwc->idx, 0); } static void alpha_pmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!(hwc->state & PERF_HES_STOPPED)) { cpuc->idx_mask &= ~(1UL<<hwc->idx); hwc->state |= PERF_HES_STOPPED; } if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { alpha_perf_event_update(event, hwc, hwc->idx, 0); hwc->state |= PERF_HES_UPTODATE; } if (cpuc->enabled) wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); } static void alpha_pmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; if (flags & PERF_EF_RELOAD) { WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); alpha_perf_event_set_period(event, hwc, hwc->idx); } hwc->state = 0; cpuc->idx_mask |= 1UL<<hwc->idx; if (cpuc->enabled) wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); } /* * Check that CPU performance counters are supported. * - currently support EV67 and later CPUs. * - actually some later revisions of the EV6 have the same PMC model as the * EV67 but we don't do suffiently deep CPU detection to detect them. * Bad luck to the very few people who might have one, I guess. */ static int supported_cpu(void) { struct percpu_struct *cpu; unsigned long cputype; /* Get cpu type from HW */ cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); cputype = cpu->type & 0xffffffff; /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */ return (cputype >= EV67_CPU) && (cputype <= EV69_CPU); } static void hw_perf_event_destroy(struct perf_event *event) { /* Nothing to be done! */ return; } static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; struct perf_event *evts[MAX_HWEVENTS]; unsigned long evtypes[MAX_HWEVENTS]; int idx_rubbish_bin[MAX_HWEVENTS]; int ev; int n; /* We only support a limited range of HARDWARE event types with one * only programmable via a RAW event type. */ if (attr->type == PERF_TYPE_HARDWARE) { if (attr->config >= alpha_pmu->max_events) return -EINVAL; ev = alpha_pmu->event_map[attr->config]; } else if (attr->type == PERF_TYPE_HW_CACHE) { return -EOPNOTSUPP; } else if (attr->type == PERF_TYPE_RAW) { ev = attr->config & 0xff; } else { return -EOPNOTSUPP; } if (ev < 0) { return ev; } /* The EV67 does not support mode exclusion */ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv || attr->exclude_idle) { return -EPERM; } /* * We place the event type in event_base here and leave calculation * of the codes to programme the PMU for alpha_pmu_enable() because * it is only then we will know what HW events are actually * scheduled on to the PMU. At that point the code to programme the * PMU is put into config_base and the PMC to use is placed into * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that * it is yet to be determined. */ hwc->event_base = ev; /* Collect events in a group together suitable for calling * alpha_check_constraints() to verify that the group as a whole can * be scheduled on to the PMU. */ n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, alpha_pmu->num_pmcs - 1, evts, evtypes, idx_rubbish_bin); if (n < 0) return -EINVAL; } evtypes[n] = hwc->event_base; evts[n] = event; if (alpha_check_constraints(evts, evtypes, n + 1)) return -EINVAL; /* Indicate that PMU config and idx are yet to be determined. */ hwc->config_base = 0; hwc->idx = PMC_NO_INDEX; event->destroy = hw_perf_event_destroy; /* * Most architectures reserve the PMU for their use at this point. * As there is no existing mechanism to arbitrate usage and there * appears to be no other user of the Alpha PMU we just assume * that we can just use it, hence a NO-OP here. * * Maybe an alpha_reserve_pmu() routine should be implemented but is * anything else ever going to use it? */ if (!hwc->sample_period) { hwc->sample_period = alpha_pmu->pmc_max_period[0]; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } return 0; } /* * Main entry point to initialise a HW performance event. */ static int alpha_pmu_event_init(struct perf_event *event) { int err; switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: break; default: return -ENOENT; } if (!alpha_pmu) return -ENODEV; /* Do the real initialisation work. */ err = __hw_perf_event_init(event); return err; } /* * Main entry point - enable HW performance counters. */ static void alpha_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->enabled) return; cpuc->enabled = 1; barrier(); if (cpuc->n_events > 0) { /* Update cpuc with information from any new scheduled events. */ maybe_change_configuration(cpuc); /* Start counting the desired events. */ wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE); wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config); wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); } } /* * Main entry point - disable HW performance counters. */ static void alpha_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!cpuc->enabled) return; cpuc->enabled = 0; cpuc->n_added = 0; wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); } static struct pmu pmu = { .pmu_enable = alpha_pmu_enable, .pmu_disable = alpha_pmu_disable, .event_init = alpha_pmu_event_init, .add = alpha_pmu_add, .del = alpha_pmu_del, .start = alpha_pmu_start, .stop = alpha_pmu_stop, .read = alpha_pmu_read, }; /* * Main entry point - don't know when this is called but it * obviously dumps debug info. */ void perf_event_print_debug(void) { unsigned long flags; unsigned long pcr; int pcr0, pcr1; int cpu; if (!supported_cpu()) return; local_irq_save(flags); cpu = smp_processor_id(); pcr = wrperfmon(PERFMON_CMD_READ, 0); pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0]; pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1]; pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1); local_irq_restore(flags); } /* * Performance Monitoring Interrupt Service Routine called when a PMC * overflows. The PMC that overflowed is passed in la_ptr. */ static void alpha_perf_event_irq_handler(unsigned long la_ptr, struct pt_regs *regs) { struct cpu_hw_events *cpuc; struct perf_sample_data data; struct perf_event *event; struct hw_perf_event *hwc; int idx, j; __get_cpu_var(irq_pmi_count)++; cpuc = &__get_cpu_var(cpu_hw_events); /* Completely counting through the PMC's period to trigger a new PMC * overflow interrupt while in this interrupt routine is utterly * disastrous! The EV6 and EV67 counters are sufficiently large to * prevent this but to be really sure disable the PMCs. */ wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); /* la_ptr is the counter that overflowed. */ if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { /* This should never occur! */ irq_err_count++; pr_warning("PMI: silly index %ld\n", la_ptr); wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } idx = la_ptr; perf_sample_data_init(&data, 0); for (j = 0; j < cpuc->n_events; j++) { if (cpuc->current_idx[j] == idx) break; } if (unlikely(j == cpuc->n_events)) { /* This can occur if the event is disabled right on a PMC overflow. */ wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } event = cpuc->event[j]; if (unlikely(!event)) { /* This should never occur! */ irq_err_count++; pr_warning("PMI: No event at index %d!\n", idx); wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } hwc = &event->hw; alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); data.period = event->hw.last_period; if (alpha_perf_event_set_period(event, hwc, idx)) { if (perf_event_overflow(event, 1, &data, regs)) { /* Interrupts coming too quickly; "throttle" the * counter, i.e., disable it for a little while. */ alpha_pmu_stop(event, 0); } } wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } /* * Init call to initialise performance events at kernel startup. */ int __init init_hw_perf_events(void) { pr_info("Performance events: "); if (!supported_cpu()) { pr_cont("No support for your CPU.\n"); return 0; } pr_cont("Supported CPU type!\n"); /* Override performance counter IRQ vector */ perf_irq = alpha_perf_event_irq_handler; /* And set up PMU specification */ alpha_pmu = &ev67_pmu; perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); return 0; } early_initcall(init_hw_perf_events);
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_0
crossvul-cpp_data_good_2262_0
/* * linux/fs/isofs/inode.c * * (C) 1991 Linus Torvalds - minix filesystem * 1992, 1993, 1994 Eric Youngdale Modified for ISO 9660 filesystem. * 1994 Eberhard Mönkeberg - multi session handling. * 1995 Mark Dobie - allow mounting of some weird VideoCDs and PhotoCDs. * 1997 Gordon Chaffee - Joliet CDs * 1998 Eric Lammerts - ISO 9660 Level 3 * 2004 Paul Serice - Inode Support pushed out from 4GB to 128GB * 2004 Paul Serice - NFS Export Operations */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/nls.h> #include <linux/ctype.h> #include <linux/statfs.h> #include <linux/cdrom.h> #include <linux/parser.h> #include <linux/mpage.h> #include <linux/user_namespace.h> #include "isofs.h" #include "zisofs.h" #define BEQUIET static int isofs_hashi(const struct dentry *parent, struct qstr *qstr); static int isofs_hash(const struct dentry *parent, struct qstr *qstr); static int isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); static int isofs_dentry_cmp(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); #ifdef CONFIG_JOLIET static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr); static int isofs_hash_ms(const struct dentry *parent, struct qstr *qstr); static int isofs_dentry_cmpi_ms(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); static int isofs_dentry_cmp_ms(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); #endif static void isofs_put_super(struct super_block *sb) { struct isofs_sb_info *sbi = ISOFS_SB(sb); #ifdef CONFIG_JOLIET unload_nls(sbi->s_nls_iocharset); #endif kfree(sbi); sb->s_fs_info = NULL; return; } static int isofs_read_inode(struct inode *, int relocated); static int isofs_statfs (struct dentry *, struct kstatfs *); static struct kmem_cache *isofs_inode_cachep; static struct inode *isofs_alloc_inode(struct super_block *sb) { struct iso_inode_info *ei; ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void isofs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode)); } static void isofs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, isofs_i_callback); } static void init_once(void *foo) { struct iso_inode_info *ei = foo; inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { isofs_inode_cachep = kmem_cache_create("isofs_inode_cache", sizeof(struct iso_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (isofs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(isofs_inode_cachep); } static int isofs_remount(struct super_block *sb, int *flags, char *data) { sync_filesystem(sb); if (!(*flags & MS_RDONLY)) return -EROFS; return 0; } static const struct super_operations isofs_sops = { .alloc_inode = isofs_alloc_inode, .destroy_inode = isofs_destroy_inode, .put_super = isofs_put_super, .statfs = isofs_statfs, .remount_fs = isofs_remount, .show_options = generic_show_options, }; static const struct dentry_operations isofs_dentry_ops[] = { { .d_hash = isofs_hash, .d_compare = isofs_dentry_cmp, }, { .d_hash = isofs_hashi, .d_compare = isofs_dentry_cmpi, }, #ifdef CONFIG_JOLIET { .d_hash = isofs_hash_ms, .d_compare = isofs_dentry_cmp_ms, }, { .d_hash = isofs_hashi_ms, .d_compare = isofs_dentry_cmpi_ms, }, #endif }; struct iso9660_options{ unsigned int rock:1; unsigned int joliet:1; unsigned int cruft:1; unsigned int hide:1; unsigned int showassoc:1; unsigned int nocompress:1; unsigned int overriderockperm:1; unsigned int uid_set:1; unsigned int gid_set:1; unsigned int utf8:1; unsigned char map; unsigned char check; unsigned int blocksize; umode_t fmode; umode_t dmode; kgid_t gid; kuid_t uid; char *iocharset; /* LVE */ s32 session; s32 sbsector; }; /* * Compute the hash for the isofs name corresponding to the dentry. */ static int isofs_hash_common(struct qstr *qstr, int ms) { const char *name; int len; len = qstr->len; name = qstr->name; if (ms) { while (len && name[len-1] == '.') len--; } qstr->hash = full_name_hash(name, len); return 0; } /* * Compute the hash for the isofs name corresponding to the dentry. */ static int isofs_hashi_common(struct qstr *qstr, int ms) { const char *name; int len; char c; unsigned long hash; len = qstr->len; name = qstr->name; if (ms) { while (len && name[len-1] == '.') len--; } hash = init_name_hash(); while (len--) { c = tolower(*name++); hash = partial_name_hash(c, hash); } qstr->hash = end_name_hash(hash); return 0; } /* * Compare of two isofs names. */ static int isofs_dentry_cmp_common( unsigned int len, const char *str, const struct qstr *name, int ms, int ci) { int alen, blen; /* A filename cannot end in '.' or we treat it like it has none */ alen = name->len; blen = len; if (ms) { while (alen && name->name[alen-1] == '.') alen--; while (blen && str[blen-1] == '.') blen--; } if (alen == blen) { if (ci) { if (strnicmp(name->name, str, alen) == 0) return 0; } else { if (strncmp(name->name, str, alen) == 0) return 0; } } return 1; } static int isofs_hash(const struct dentry *dentry, struct qstr *qstr) { return isofs_hash_common(qstr, 0); } static int isofs_hashi(const struct dentry *dentry, struct qstr *qstr) { return isofs_hashi_common(qstr, 0); } static int isofs_dentry_cmp(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return isofs_dentry_cmp_common(len, str, name, 0, 0); } static int isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return isofs_dentry_cmp_common(len, str, name, 0, 1); } #ifdef CONFIG_JOLIET static int isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr) { return isofs_hash_common(qstr, 1); } static int isofs_hashi_ms(const struct dentry *dentry, struct qstr *qstr) { return isofs_hashi_common(qstr, 1); } static int isofs_dentry_cmp_ms(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return isofs_dentry_cmp_common(len, str, name, 1, 0); } static int isofs_dentry_cmpi_ms(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return isofs_dentry_cmp_common(len, str, name, 1, 1); } #endif enum { Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore, Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet, Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err, Opt_nocompress, Opt_hide, Opt_showassoc, Opt_dmode, Opt_overriderockperm, }; static const match_table_t tokens = { {Opt_norock, "norock"}, {Opt_nojoliet, "nojoliet"}, {Opt_unhide, "unhide"}, {Opt_hide, "hide"}, {Opt_showassoc, "showassoc"}, {Opt_cruft, "cruft"}, {Opt_utf8, "utf8"}, {Opt_iocharset, "iocharset=%s"}, {Opt_map_a, "map=acorn"}, {Opt_map_a, "map=a"}, {Opt_map_n, "map=normal"}, {Opt_map_n, "map=n"}, {Opt_map_o, "map=off"}, {Opt_map_o, "map=o"}, {Opt_session, "session=%u"}, {Opt_sb, "sbsector=%u"}, {Opt_check_r, "check=relaxed"}, {Opt_check_r, "check=r"}, {Opt_check_s, "check=strict"}, {Opt_check_s, "check=s"}, {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_mode, "mode=%u"}, {Opt_dmode, "dmode=%u"}, {Opt_overriderockperm, "overriderockperm"}, {Opt_block, "block=%u"}, {Opt_ignore, "conv=binary"}, {Opt_ignore, "conv=b"}, {Opt_ignore, "conv=text"}, {Opt_ignore, "conv=t"}, {Opt_ignore, "conv=mtext"}, {Opt_ignore, "conv=m"}, {Opt_ignore, "conv=auto"}, {Opt_ignore, "conv=a"}, {Opt_nocompress, "nocompress"}, {Opt_err, NULL} }; static int parse_options(char *options, struct iso9660_options *popt) { char *p; int option; popt->map = 'n'; popt->rock = 1; popt->joliet = 1; popt->cruft = 0; popt->hide = 0; popt->showassoc = 0; popt->check = 'u'; /* unset */ popt->nocompress = 0; popt->blocksize = 1024; popt->fmode = popt->dmode = ISOFS_INVALID_MODE; popt->uid_set = 0; popt->gid_set = 0; popt->gid = GLOBAL_ROOT_GID; popt->uid = GLOBAL_ROOT_UID; popt->iocharset = NULL; popt->utf8 = 0; popt->overriderockperm = 0; popt->session=-1; popt->sbsector=-1; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; substring_t args[MAX_OPT_ARGS]; unsigned n; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_norock: popt->rock = 0; break; case Opt_nojoliet: popt->joliet = 0; break; case Opt_hide: popt->hide = 1; break; case Opt_unhide: case Opt_showassoc: popt->showassoc = 1; break; case Opt_cruft: popt->cruft = 1; break; case Opt_utf8: popt->utf8 = 1; break; #ifdef CONFIG_JOLIET case Opt_iocharset: popt->iocharset = match_strdup(&args[0]); break; #endif case Opt_map_a: popt->map = 'a'; break; case Opt_map_o: popt->map = 'o'; break; case Opt_map_n: popt->map = 'n'; break; case Opt_session: if (match_int(&args[0], &option)) return 0; n = option; if (n > 99) return 0; popt->session = n + 1; break; case Opt_sb: if (match_int(&args[0], &option)) return 0; popt->sbsector = option; break; case Opt_check_r: popt->check = 'r'; break; case Opt_check_s: popt->check = 's'; break; case Opt_ignore: break; case Opt_uid: if (match_int(&args[0], &option)) return 0; popt->uid = make_kuid(current_user_ns(), option); if (!uid_valid(popt->uid)) return 0; popt->uid_set = 1; break; case Opt_gid: if (match_int(&args[0], &option)) return 0; popt->gid = make_kgid(current_user_ns(), option); if (!gid_valid(popt->gid)) return 0; popt->gid_set = 1; break; case Opt_mode: if (match_int(&args[0], &option)) return 0; popt->fmode = option; break; case Opt_dmode: if (match_int(&args[0], &option)) return 0; popt->dmode = option; break; case Opt_overriderockperm: popt->overriderockperm = 1; break; case Opt_block: if (match_int(&args[0], &option)) return 0; n = option; if (n != 512 && n != 1024 && n != 2048) return 0; popt->blocksize = n; break; case Opt_nocompress: popt->nocompress = 1; break; default: return 0; } } return 1; } /* * look if the driver can tell the multi session redirection value * * don't change this if you don't know what you do, please! * Multisession is legal only with XA disks. * A non-XA disk with more than one volume descriptor may do it right, but * usually is written in a nowhere standardized "multi-partition" manner. * Multisession uses absolute addressing (solely the first frame of the whole * track is #0), multi-partition uses relative addressing (each first frame of * each track is #0), and a track is not a session. * * A broken CDwriter software or drive firmware does not set new standards, * at least not if conflicting with the existing ones. * * emoenke@gwdg.de */ #define WE_OBEY_THE_WRITTEN_STANDARDS 1 static unsigned int isofs_get_last_session(struct super_block *sb, s32 session) { struct cdrom_multisession ms_info; unsigned int vol_desc_start; struct block_device *bdev = sb->s_bdev; int i; vol_desc_start=0; ms_info.addr_format=CDROM_LBA; if(session >= 0 && session <= 99) { struct cdrom_tocentry Te; Te.cdte_track=session; Te.cdte_format=CDROM_LBA; i = ioctl_by_bdev(bdev, CDROMREADTOCENTRY, (unsigned long) &Te); if (!i) { printk(KERN_DEBUG "ISOFS: Session %d start %d type %d\n", session, Te.cdte_addr.lba, Te.cdte_ctrl&CDROM_DATA_TRACK); if ((Te.cdte_ctrl&CDROM_DATA_TRACK) == 4) return Te.cdte_addr.lba; } printk(KERN_ERR "ISOFS: Invalid session number or type of track\n"); } i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long) &ms_info); if (session > 0) printk(KERN_ERR "ISOFS: Invalid session number\n"); #if 0 printk(KERN_DEBUG "isofs.inode: CDROMMULTISESSION: rc=%d\n",i); if (i==0) { printk(KERN_DEBUG "isofs.inode: XA disk: %s\n",ms_info.xa_flag?"yes":"no"); printk(KERN_DEBUG "isofs.inode: vol_desc_start = %d\n", ms_info.addr.lba); } #endif if (i==0) #if WE_OBEY_THE_WRITTEN_STANDARDS if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */ #endif vol_desc_start=ms_info.addr.lba; return vol_desc_start; } /* * Check if root directory is empty (has less than 3 files). * * Used to detect broken CDs where ISO root directory is empty but Joliet root * directory is OK. If such CD has Rock Ridge extensions, they will be disabled * (and Joliet used instead) or else no files would be visible. */ static bool rootdir_empty(struct super_block *sb, unsigned long block) { int offset = 0, files = 0, de_len; struct iso_directory_record *de; struct buffer_head *bh; bh = sb_bread(sb, block); if (!bh) return true; while (files < 3) { de = (struct iso_directory_record *) (bh->b_data + offset); de_len = *(unsigned char *) de; if (de_len == 0) break; files++; offset += de_len; } brelse(bh); return files < 3; } /* * Initialize the superblock and read the root inode. * * Note: a check_disk_change() has been done immediately prior * to this call, so we don't need to check again. */ static int isofs_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh = NULL, *pri_bh = NULL; struct hs_primary_descriptor *h_pri = NULL; struct iso_primary_descriptor *pri = NULL; struct iso_supplementary_descriptor *sec = NULL; struct iso_directory_record *rootp; struct inode *inode; struct iso9660_options opt; struct isofs_sb_info *sbi; unsigned long first_data_zone; int joliet_level = 0; int iso_blknum, block; int orig_zonesize; int table, error = -EINVAL; unsigned int vol_desc_start; save_mount_options(s, data); sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; s->s_fs_info = sbi; if (!parse_options((char *)data, &opt)) goto out_freesbi; /* * First of all, get the hardware blocksize for this device. * If we don't know what it is, or the hardware blocksize is * larger than the blocksize the user specified, then use * that value. */ /* * What if bugger tells us to go beyond page size? */ opt.blocksize = sb_min_blocksize(s, opt.blocksize); sbi->s_high_sierra = 0; /* default is iso9660 */ vol_desc_start = (opt.sbsector != -1) ? opt.sbsector : isofs_get_last_session(s,opt.session); for (iso_blknum = vol_desc_start+16; iso_blknum < vol_desc_start+100; iso_blknum++) { struct hs_volume_descriptor *hdp; struct iso_volume_descriptor *vdp; block = iso_blknum << (ISOFS_BLOCK_BITS - s->s_blocksize_bits); if (!(bh = sb_bread(s, block))) goto out_no_read; vdp = (struct iso_volume_descriptor *)bh->b_data; hdp = (struct hs_volume_descriptor *)bh->b_data; /* * Due to the overlapping physical location of the descriptors, * ISO CDs can match hdp->id==HS_STANDARD_ID as well. To ensure * proper identification in this case, we first check for ISO. */ if (strncmp (vdp->id, ISO_STANDARD_ID, sizeof vdp->id) == 0) { if (isonum_711(vdp->type) == ISO_VD_END) break; if (isonum_711(vdp->type) == ISO_VD_PRIMARY) { if (pri == NULL) { pri = (struct iso_primary_descriptor *)vdp; /* Save the buffer in case we need it ... */ pri_bh = bh; bh = NULL; } } #ifdef CONFIG_JOLIET else if (isonum_711(vdp->type) == ISO_VD_SUPPLEMENTARY) { sec = (struct iso_supplementary_descriptor *)vdp; if (sec->escape[0] == 0x25 && sec->escape[1] == 0x2f) { if (opt.joliet) { if (sec->escape[2] == 0x40) joliet_level = 1; else if (sec->escape[2] == 0x43) joliet_level = 2; else if (sec->escape[2] == 0x45) joliet_level = 3; printk(KERN_DEBUG "ISO 9660 Extensions: " "Microsoft Joliet Level %d\n", joliet_level); } goto root_found; } else { /* Unknown supplementary volume descriptor */ sec = NULL; } } #endif } else { if (strncmp (hdp->id, HS_STANDARD_ID, sizeof hdp->id) == 0) { if (isonum_711(hdp->type) != ISO_VD_PRIMARY) goto out_freebh; sbi->s_high_sierra = 1; opt.rock = 0; h_pri = (struct hs_primary_descriptor *)vdp; goto root_found; } } /* Just skip any volume descriptors we don't recognize */ brelse(bh); bh = NULL; } /* * If we fall through, either no volume descriptor was found, * or else we passed a primary descriptor looking for others. */ if (!pri) goto out_unknown_format; brelse(bh); bh = pri_bh; pri_bh = NULL; root_found: if (joliet_level && (pri == NULL || !opt.rock)) { /* This is the case of Joliet with the norock mount flag. * A disc with both Joliet and Rock Ridge is handled later */ pri = (struct iso_primary_descriptor *) sec; } if(sbi->s_high_sierra){ rootp = (struct iso_directory_record *) h_pri->root_directory_record; sbi->s_nzones = isonum_733(h_pri->volume_space_size); sbi->s_log_zone_size = isonum_723(h_pri->logical_block_size); sbi->s_max_size = isonum_733(h_pri->volume_space_size); } else { if (!pri) goto out_freebh; rootp = (struct iso_directory_record *) pri->root_directory_record; sbi->s_nzones = isonum_733(pri->volume_space_size); sbi->s_log_zone_size = isonum_723(pri->logical_block_size); sbi->s_max_size = isonum_733(pri->volume_space_size); } sbi->s_ninodes = 0; /* No way to figure this out easily */ orig_zonesize = sbi->s_log_zone_size; /* * If the zone size is smaller than the hardware sector size, * this is a fatal error. This would occur if the disc drive * had sectors that were 2048 bytes, but the filesystem had * blocks that were 512 bytes (which should only very rarely * happen.) */ if (orig_zonesize < opt.blocksize) goto out_bad_size; /* RDE: convert log zone size to bit shift */ switch (sbi->s_log_zone_size) { case 512: sbi->s_log_zone_size = 9; break; case 1024: sbi->s_log_zone_size = 10; break; case 2048: sbi->s_log_zone_size = 11; break; default: goto out_bad_zone_size; } s->s_magic = ISOFS_SUPER_MAGIC; /* * With multi-extent files, file size is only limited by the maximum * size of a file system, which is 8 TB. */ s->s_maxbytes = 0x80000000000LL; /* Set this for reference. Its not currently used except on write which we don't have .. */ first_data_zone = isonum_733(rootp->extent) + isonum_711(rootp->ext_attr_length); sbi->s_firstdatazone = first_data_zone; #ifndef BEQUIET printk(KERN_DEBUG "ISOFS: Max size:%ld Log zone size:%ld\n", sbi->s_max_size, 1UL << sbi->s_log_zone_size); printk(KERN_DEBUG "ISOFS: First datazone:%ld\n", sbi->s_firstdatazone); if(sbi->s_high_sierra) printk(KERN_DEBUG "ISOFS: Disc in High Sierra format.\n"); #endif /* * If the Joliet level is set, we _may_ decide to use the * secondary descriptor, but can't be sure until after we * read the root inode. But before reading the root inode * we may need to change the device blocksize, and would * rather release the old buffer first. So, we cache the * first_data_zone value from the secondary descriptor. */ if (joliet_level) { pri = (struct iso_primary_descriptor *) sec; rootp = (struct iso_directory_record *) pri->root_directory_record; first_data_zone = isonum_733(rootp->extent) + isonum_711(rootp->ext_attr_length); } /* * We're all done using the volume descriptor, and may need * to change the device blocksize, so release the buffer now. */ brelse(pri_bh); brelse(bh); /* * Force the blocksize to 512 for 512 byte sectors. The file * read primitives really get it wrong in a bad way if we don't * do this. * * Note - we should never be setting the blocksize to something * less than the hardware sector size for the device. If we * do, we would end up having to read larger buffers and split * out portions to satisfy requests. * * Note2- the idea here is that we want to deal with the optimal * zonesize in the filesystem. If we have it set to something less, * then we have horrible problems with trying to piece together * bits of adjacent blocks in order to properly read directory * entries. By forcing the blocksize in this way, we ensure * that we will never be required to do this. */ sb_set_blocksize(s, orig_zonesize); sbi->s_nls_iocharset = NULL; #ifdef CONFIG_JOLIET if (joliet_level && opt.utf8 == 0) { char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT; sbi->s_nls_iocharset = load_nls(p); if (! sbi->s_nls_iocharset) { /* Fail only if explicit charset specified */ if (opt.iocharset) goto out_freesbi; sbi->s_nls_iocharset = load_nls_default(); } } #endif s->s_op = &isofs_sops; s->s_export_op = &isofs_export_ops; sbi->s_mapping = opt.map; sbi->s_rock = (opt.rock ? 2 : 0); sbi->s_rock_offset = -1; /* initial offset, will guess until SP is found*/ sbi->s_cruft = opt.cruft; sbi->s_hide = opt.hide; sbi->s_showassoc = opt.showassoc; sbi->s_uid = opt.uid; sbi->s_gid = opt.gid; sbi->s_uid_set = opt.uid_set; sbi->s_gid_set = opt.gid_set; sbi->s_utf8 = opt.utf8; sbi->s_nocompress = opt.nocompress; sbi->s_overriderockperm = opt.overriderockperm; /* * It would be incredibly stupid to allow people to mark every file * on the disk as suid, so we merely allow them to set the default * permissions. */ if (opt.fmode != ISOFS_INVALID_MODE) sbi->s_fmode = opt.fmode & 0777; else sbi->s_fmode = ISOFS_INVALID_MODE; if (opt.dmode != ISOFS_INVALID_MODE) sbi->s_dmode = opt.dmode & 0777; else sbi->s_dmode = ISOFS_INVALID_MODE; /* * Read the root inode, which _may_ result in changing * the s_rock flag. Once we have the final s_rock value, * we then decide whether to use the Joliet descriptor. */ inode = isofs_iget(s, sbi->s_firstdatazone, 0); if (IS_ERR(inode)) goto out_no_root; /* * Fix for broken CDs with Rock Ridge and empty ISO root directory but * correct Joliet root directory. */ if (sbi->s_rock == 1 && joliet_level && rootdir_empty(s, sbi->s_firstdatazone)) { printk(KERN_NOTICE "ISOFS: primary root directory is empty. " "Disabling Rock Ridge and switching to Joliet."); sbi->s_rock = 0; } /* * If this disk has both Rock Ridge and Joliet on it, then we * want to use Rock Ridge by default. This can be overridden * by using the norock mount option. There is still one other * possibility that is not taken into account: a Rock Ridge * CD with Unicode names. Until someone sees such a beast, it * will not be supported. */ if (sbi->s_rock == 1) { joliet_level = 0; } else if (joliet_level) { sbi->s_rock = 0; if (sbi->s_firstdatazone != first_data_zone) { sbi->s_firstdatazone = first_data_zone; printk(KERN_DEBUG "ISOFS: changing to secondary root\n"); iput(inode); inode = isofs_iget(s, sbi->s_firstdatazone, 0); if (IS_ERR(inode)) goto out_no_root; } } if (opt.check == 'u') { /* Only Joliet is case insensitive by default */ if (joliet_level) opt.check = 'r'; else opt.check = 's'; } sbi->s_joliet_level = joliet_level; /* Make sure the root inode is a directory */ if (!S_ISDIR(inode->i_mode)) { printk(KERN_WARNING "isofs_fill_super: root inode is not a directory. " "Corrupted media?\n"); goto out_iput; } table = 0; if (joliet_level) table += 2; if (opt.check == 'r') table++; s->s_d_op = &isofs_dentry_ops[table]; /* get the root dentry */ s->s_root = d_make_root(inode); if (!(s->s_root)) { error = -ENOMEM; goto out_no_inode; } kfree(opt.iocharset); return 0; /* * Display error messages and free resources. */ out_iput: iput(inode); goto out_no_inode; out_no_root: error = PTR_ERR(inode); if (error != -ENOMEM) printk(KERN_WARNING "%s: get root inode failed\n", __func__); out_no_inode: #ifdef CONFIG_JOLIET unload_nls(sbi->s_nls_iocharset); #endif goto out_freesbi; out_no_read: printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n", __func__, s->s_id, iso_blknum, block); goto out_freebh; out_bad_zone_size: printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n", sbi->s_log_zone_size); goto out_freebh; out_bad_size: printk(KERN_WARNING "ISOFS: Logical zone size(%d) < hardware blocksize(%u)\n", orig_zonesize, opt.blocksize); goto out_freebh; out_unknown_format: if (!silent) printk(KERN_WARNING "ISOFS: Unable to identify CD-ROM format.\n"); out_freebh: brelse(bh); brelse(pri_bh); out_freesbi: kfree(opt.iocharset); kfree(sbi); s->s_fs_info = NULL; return error; } static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = ISOFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = (ISOFS_SB(sb)->s_nzones << (ISOFS_SB(sb)->s_log_zone_size - sb->s_blocksize_bits)); buf->f_bfree = 0; buf->f_bavail = 0; buf->f_files = ISOFS_SB(sb)->s_ninodes; buf->f_ffree = 0; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); buf->f_namelen = NAME_MAX; return 0; } /* * Get a set of blocks; filling in buffer_heads if already allocated * or getblk() if they are not. Returns the number of blocks inserted * (-ve == error.) */ int isofs_get_blocks(struct inode *inode, sector_t iblock, struct buffer_head **bh, unsigned long nblocks) { unsigned long b_off = iblock; unsigned offset, sect_size; unsigned int firstext; unsigned long nextblk, nextoff; int section, rv, error; struct iso_inode_info *ei = ISOFS_I(inode); error = -EIO; rv = 0; if (iblock != b_off) { printk(KERN_DEBUG "%s: block number too large\n", __func__); goto abort; } offset = 0; firstext = ei->i_first_extent; sect_size = ei->i_section_size >> ISOFS_BUFFER_BITS(inode); nextblk = ei->i_next_section_block; nextoff = ei->i_next_section_offset; section = 0; while (nblocks) { /* If we are *way* beyond the end of the file, print a message. * Access beyond the end of the file up to the next page boundary * is normal, however because of the way the page cache works. * In this case, we just return 0 so that we can properly fill * the page with useless information without generating any * I/O errors. */ if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) { printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n", __func__, b_off, (unsigned long long)inode->i_size); goto abort; } /* On the last section, nextblk == 0, section size is likely to * exceed sect_size by a partial block, and access beyond the * end of the file will reach beyond the section size, too. */ while (nextblk && (b_off >= (offset + sect_size))) { struct inode *ninode; offset += sect_size; ninode = isofs_iget(inode->i_sb, nextblk, nextoff); if (IS_ERR(ninode)) { error = PTR_ERR(ninode); goto abort; } firstext = ISOFS_I(ninode)->i_first_extent; sect_size = ISOFS_I(ninode)->i_section_size >> ISOFS_BUFFER_BITS(ninode); nextblk = ISOFS_I(ninode)->i_next_section_block; nextoff = ISOFS_I(ninode)->i_next_section_offset; iput(ninode); if (++section > 100) { printk(KERN_DEBUG "%s: More than 100 file sections ?!?" " aborting...\n", __func__); printk(KERN_DEBUG "%s: block=%lu firstext=%u sect_size=%u " "nextblk=%lu nextoff=%lu\n", __func__, b_off, firstext, (unsigned) sect_size, nextblk, nextoff); goto abort; } } if (*bh) { map_bh(*bh, inode->i_sb, firstext + b_off - offset); } else { *bh = sb_getblk(inode->i_sb, firstext+b_off-offset); if (!*bh) goto abort; } bh++; /* Next buffer head */ b_off++; /* Next buffer offset */ nblocks--; rv++; } error = 0; abort: return rv != 0 ? rv : error; } /* * Used by the standard interfaces. */ static int isofs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret; if (create) { printk(KERN_DEBUG "%s: Kernel tries to allocate a block\n", __func__); return -EROFS; } ret = isofs_get_blocks(inode, iblock, &bh_result, 1); return ret < 0 ? ret : 0; } static int isofs_bmap(struct inode *inode, sector_t block) { struct buffer_head dummy; int error; dummy.b_state = 0; dummy.b_blocknr = -1000; error = isofs_get_block(inode, block, &dummy, 0); if (!error) return dummy.b_blocknr; return 0; } struct buffer_head *isofs_bread(struct inode *inode, sector_t block) { sector_t blknr = isofs_bmap(inode, block); if (!blknr) return NULL; return sb_bread(inode->i_sb, blknr); } static int isofs_readpage(struct file *file, struct page *page) { return mpage_readpage(page, isofs_get_block); } static int isofs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, isofs_get_block); } static sector_t _isofs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,isofs_get_block); } static const struct address_space_operations isofs_aops = { .readpage = isofs_readpage, .readpages = isofs_readpages, .bmap = _isofs_bmap }; static int isofs_read_level3_size(struct inode *inode) { unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); int high_sierra = ISOFS_SB(inode->i_sb)->s_high_sierra; struct buffer_head *bh = NULL; unsigned long block, offset, block_saved, offset_saved; int i = 0; int more_entries = 0; struct iso_directory_record *tmpde = NULL; struct iso_inode_info *ei = ISOFS_I(inode); inode->i_size = 0; /* The first 16 blocks are reserved as the System Area. Thus, * no inodes can appear in block 0. We use this to flag that * this is the last section. */ ei->i_next_section_block = 0; ei->i_next_section_offset = 0; block = ei->i_iget5_block; offset = ei->i_iget5_offset; do { struct iso_directory_record *de; unsigned int de_len; if (!bh) { bh = sb_bread(inode->i_sb, block); if (!bh) goto out_noread; } de = (struct iso_directory_record *) (bh->b_data + offset); de_len = *(unsigned char *) de; if (de_len == 0) { brelse(bh); bh = NULL; ++block; offset = 0; continue; } block_saved = block; offset_saved = offset; offset += de_len; /* Make sure we have a full directory entry */ if (offset >= bufsize) { int slop = bufsize - offset + de_len; if (!tmpde) { tmpde = kmalloc(256, GFP_KERNEL); if (!tmpde) goto out_nomem; } memcpy(tmpde, de, slop); offset &= bufsize - 1; block++; brelse(bh); bh = NULL; if (offset) { bh = sb_bread(inode->i_sb, block); if (!bh) goto out_noread; memcpy((void *)tmpde+slop, bh->b_data, offset); } de = tmpde; } inode->i_size += isonum_733(de->size); if (i == 1) { ei->i_next_section_block = block_saved; ei->i_next_section_offset = offset_saved; } more_entries = de->flags[-high_sierra] & 0x80; i++; if (i > 100) goto out_toomany; } while (more_entries); out: kfree(tmpde); if (bh) brelse(bh); return 0; out_nomem: if (bh) brelse(bh); return -ENOMEM; out_noread: printk(KERN_INFO "ISOFS: unable to read i-node block %lu\n", block); kfree(tmpde); return -EIO; out_toomany: printk(KERN_INFO "%s: More than 100 file sections ?!?, aborting...\n" "isofs_read_level3_size: inode=%lu\n", __func__, inode->i_ino); goto out; } static int isofs_read_inode(struct inode *inode, int relocated) { struct super_block *sb = inode->i_sb; struct isofs_sb_info *sbi = ISOFS_SB(sb); unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); unsigned long block; int high_sierra = sbi->s_high_sierra; struct buffer_head *bh = NULL; struct iso_directory_record *de; struct iso_directory_record *tmpde = NULL; unsigned int de_len; unsigned long offset; struct iso_inode_info *ei = ISOFS_I(inode); int ret = -EIO; block = ei->i_iget5_block; bh = sb_bread(inode->i_sb, block); if (!bh) goto out_badread; offset = ei->i_iget5_offset; de = (struct iso_directory_record *) (bh->b_data + offset); de_len = *(unsigned char *) de; if (offset + de_len > bufsize) { int frag1 = bufsize - offset; tmpde = kmalloc(de_len, GFP_KERNEL); if (tmpde == NULL) { printk(KERN_INFO "%s: out of memory\n", __func__); ret = -ENOMEM; goto fail; } memcpy(tmpde, bh->b_data + offset, frag1); brelse(bh); bh = sb_bread(inode->i_sb, ++block); if (!bh) goto out_badread; memcpy((char *)tmpde+frag1, bh->b_data, de_len - frag1); de = tmpde; } inode->i_ino = isofs_get_ino(ei->i_iget5_block, ei->i_iget5_offset, ISOFS_BUFFER_BITS(inode)); /* Assume it is a normal-format file unless told otherwise */ ei->i_file_format = isofs_file_normal; if (de->flags[-high_sierra] & 2) { if (sbi->s_dmode != ISOFS_INVALID_MODE) inode->i_mode = S_IFDIR | sbi->s_dmode; else inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; set_nlink(inode, 1); /* * Set to 1. We know there are 2, but * the find utility tries to optimize * if it is 2, and it screws up. It is * easier to give 1 which tells find to * do it the hard way. */ } else { if (sbi->s_fmode != ISOFS_INVALID_MODE) { inode->i_mode = S_IFREG | sbi->s_fmode; } else { /* * Set default permissions: r-x for all. The disc * could be shared with DOS machines so virtually * anything could be a valid executable. */ inode->i_mode = S_IFREG | S_IRUGO | S_IXUGO; } set_nlink(inode, 1); } inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; inode->i_blocks = 0; ei->i_format_parm[0] = 0; ei->i_format_parm[1] = 0; ei->i_format_parm[2] = 0; ei->i_section_size = isonum_733(de->size); if (de->flags[-high_sierra] & 0x80) { ret = isofs_read_level3_size(inode); if (ret < 0) goto fail; ret = -EIO; } else { ei->i_next_section_block = 0; ei->i_next_section_offset = 0; inode->i_size = isonum_733(de->size); } /* * Some dipshit decided to store some other bit of information * in the high byte of the file length. Truncate size in case * this CDROM was mounted with the cruft option. */ if (sbi->s_cruft) inode->i_size &= 0x00ffffff; if (de->interleave[0]) { printk(KERN_DEBUG "ISOFS: Interleaved files not (yet) supported.\n"); inode->i_size = 0; } /* I have no idea what file_unit_size is used for, so we will flag it for now */ if (de->file_unit_size[0] != 0) { printk(KERN_DEBUG "ISOFS: File unit size != 0 for ISO file (%ld).\n", inode->i_ino); } /* I have no idea what other flag bits are used for, so we will flag it for now */ #ifdef DEBUG if((de->flags[-high_sierra] & ~2)!= 0){ printk(KERN_DEBUG "ISOFS: Unusual flag settings for ISO file " "(%ld %x).\n", inode->i_ino, de->flags[-high_sierra]); } #endif inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = iso_date(de->date, high_sierra); inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = 0; ei->i_first_extent = (isonum_733(de->extent) + isonum_711(de->ext_attr_length)); /* Set the number of blocks for stat() - should be done before RR */ inode->i_blocks = (inode->i_size + 511) >> 9; /* * Now test for possible Rock Ridge extensions which will override * some of these numbers in the inode structure. */ if (!high_sierra) { parse_rock_ridge_inode(de, inode, relocated); /* if we want uid/gid set, override the rock ridge setting */ if (sbi->s_uid_set) inode->i_uid = sbi->s_uid; if (sbi->s_gid_set) inode->i_gid = sbi->s_gid; } /* Now set final access rights if overriding rock ridge setting */ if (S_ISDIR(inode->i_mode) && sbi->s_overriderockperm && sbi->s_dmode != ISOFS_INVALID_MODE) inode->i_mode = S_IFDIR | sbi->s_dmode; if (S_ISREG(inode->i_mode) && sbi->s_overriderockperm && sbi->s_fmode != ISOFS_INVALID_MODE) inode->i_mode = S_IFREG | sbi->s_fmode; /* Install the inode operations vector */ if (S_ISREG(inode->i_mode)) { inode->i_fop = &generic_ro_fops; switch (ei->i_file_format) { #ifdef CONFIG_ZISOFS case isofs_file_compressed: inode->i_data.a_ops = &zisofs_aops; break; #endif default: inode->i_data.a_ops = &isofs_aops; break; } } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &isofs_dir_inode_operations; inode->i_fop = &isofs_dir_operations; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &page_symlink_inode_operations; inode->i_data.a_ops = &isofs_symlink_aops; } else /* XXX - parse_rock_ridge_inode() had already set i_rdev. */ init_special_inode(inode, inode->i_mode, inode->i_rdev); ret = 0; out: kfree(tmpde); if (bh) brelse(bh); return ret; out_badread: printk(KERN_WARNING "ISOFS: unable to read i-node block\n"); fail: goto out; } struct isofs_iget5_callback_data { unsigned long block; unsigned long offset; }; static int isofs_iget5_test(struct inode *ino, void *data) { struct iso_inode_info *i = ISOFS_I(ino); struct isofs_iget5_callback_data *d = (struct isofs_iget5_callback_data*)data; return (i->i_iget5_block == d->block) && (i->i_iget5_offset == d->offset); } static int isofs_iget5_set(struct inode *ino, void *data) { struct iso_inode_info *i = ISOFS_I(ino); struct isofs_iget5_callback_data *d = (struct isofs_iget5_callback_data*)data; i->i_iget5_block = d->block; i->i_iget5_offset = d->offset; return 0; } /* Store, in the inode's containing structure, the block and block * offset that point to the underlying meta-data for the inode. The * code below is otherwise similar to the iget() code in * include/linux/fs.h */ struct inode *__isofs_iget(struct super_block *sb, unsigned long block, unsigned long offset, int relocated) { unsigned long hashval; struct inode *inode; struct isofs_iget5_callback_data data; long ret; if (offset >= 1ul << sb->s_blocksize_bits) return ERR_PTR(-EINVAL); data.block = block; data.offset = offset; hashval = (block << sb->s_blocksize_bits) | offset; inode = iget5_locked(sb, hashval, &isofs_iget5_test, &isofs_iget5_set, &data); if (!inode) return ERR_PTR(-ENOMEM); if (inode->i_state & I_NEW) { ret = isofs_read_inode(inode, relocated); if (ret < 0) { iget_failed(inode); inode = ERR_PTR(ret); } else { unlock_new_inode(inode); } } return inode; } static struct dentry *isofs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { /* We don't support read-write mounts */ if (!(flags & MS_RDONLY)) return ERR_PTR(-EACCES); return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); } static struct file_system_type iso9660_fs_type = { .owner = THIS_MODULE, .name = "iso9660", .mount = isofs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("iso9660"); MODULE_ALIAS("iso9660"); static int __init init_iso9660_fs(void) { int err = init_inodecache(); if (err) goto out; #ifdef CONFIG_ZISOFS err = zisofs_init(); if (err) goto out1; #endif err = register_filesystem(&iso9660_fs_type); if (err) goto out2; return 0; out2: #ifdef CONFIG_ZISOFS zisofs_cleanup(); out1: #endif destroy_inodecache(); out: return err; } static void __exit exit_iso9660_fs(void) { unregister_filesystem(&iso9660_fs_type); #ifdef CONFIG_ZISOFS zisofs_cleanup(); #endif destroy_inodecache(); } module_init(init_iso9660_fs) module_exit(exit_iso9660_fs) MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-399/c/good_2262_0
crossvul-cpp_data_bad_2295_1
/* * Copyright (c) Christos Zoulas 2003. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: readelf.c,v 1.103 2014/05/02 02:25:10 christos Exp $") #endif #ifdef BUILTIN_ELF #include <string.h> #include <ctype.h> #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "readelf.h" #include "magic.h" #ifdef ELFCORE private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *); #endif private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int); private int doshn(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int, int); private size_t donote(struct magic_set *, void *, size_t, size_t, int, int, size_t, int *); #define ELF_ALIGN(a) ((((a) + align - 1) / align) * align) #define isquote(c) (strchr("'\"`", (c)) != NULL) private uint16_t getu16(int, uint16_t); private uint32_t getu32(int, uint32_t); private uint64_t getu64(int, uint64_t); private uint16_t getu16(int swap, uint16_t value) { union { uint16_t ui; char c[2]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[1]; retval.c[1] = tmpval.c[0]; return retval.ui; } else return value; } private uint32_t getu32(int swap, uint32_t value) { union { uint32_t ui; char c[4]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[3]; retval.c[1] = tmpval.c[2]; retval.c[2] = tmpval.c[1]; retval.c[3] = tmpval.c[0]; return retval.ui; } else return value; } private uint64_t getu64(int swap, uint64_t value) { union { uint64_t ui; char c[8]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[7]; retval.c[1] = tmpval.c[6]; retval.c[2] = tmpval.c[5]; retval.c[3] = tmpval.c[4]; retval.c[4] = tmpval.c[3]; retval.c[5] = tmpval.c[2]; retval.c[6] = tmpval.c[1]; retval.c[7] = tmpval.c[0]; return retval.ui; } else return value; } #define elf_getu16(swap, value) getu16(swap, value) #define elf_getu32(swap, value) getu32(swap, value) #define elf_getu64(swap, value) getu64(swap, value) #define xsh_addr (clazz == ELFCLASS32 \ ? (void *)&sh32 \ : (void *)&sh64) #define xsh_sizeof (clazz == ELFCLASS32 \ ? sizeof(sh32) \ : sizeof(sh64)) #define xsh_size (size_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_size) \ : elf_getu64(swap, sh64.sh_size)) #define xsh_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_offset) \ : elf_getu64(swap, sh64.sh_offset)) #define xsh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_type) \ : elf_getu32(swap, sh64.sh_type)) #define xsh_name (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_name) \ : elf_getu32(swap, sh64.sh_name)) #define xph_addr (clazz == ELFCLASS32 \ ? (void *) &ph32 \ : (void *) &ph64) #define xph_sizeof (clazz == ELFCLASS32 \ ? sizeof(ph32) \ : sizeof(ph64)) #define xph_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_type) \ : elf_getu32(swap, ph64.p_type)) #define xph_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_offset) \ : elf_getu64(swap, ph64.p_offset)) #define xph_align (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_align ? \ elf_getu32(swap, ph32.p_align) : 4) \ : (off_t) (ph64.p_align ? \ elf_getu64(swap, ph64.p_align) : 4))) #define xph_filesz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_filesz) \ : elf_getu64(swap, ph64.p_filesz))) #define xnh_addr (clazz == ELFCLASS32 \ ? (void *)&nh32 \ : (void *)&nh64) #define xph_memsz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_memsz) \ : elf_getu64(swap, ph64.p_memsz))) #define xnh_sizeof (clazz == ELFCLASS32 \ ? sizeof nh32 \ : sizeof nh64) #define xnh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_type) \ : elf_getu32(swap, nh64.n_type)) #define xnh_namesz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_namesz) \ : elf_getu32(swap, nh64.n_namesz)) #define xnh_descsz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_descsz) \ : elf_getu32(swap, nh64.n_descsz)) #define prpsoffsets(i) (clazz == ELFCLASS32 \ ? prpsoffsets32[i] \ : prpsoffsets64[i]) #define xcap_addr (clazz == ELFCLASS32 \ ? (void *)&cap32 \ : (void *)&cap64) #define xcap_sizeof (clazz == ELFCLASS32 \ ? sizeof cap32 \ : sizeof cap64) #define xcap_tag (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_tag) \ : elf_getu64(swap, cap64.c_tag)) #define xcap_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_un.c_val) \ : elf_getu64(swap, cap64.c_un.c_val)) #ifdef ELFCORE /* * Try larger offsets first to avoid false matches * from earlier data that happen to look like strings. */ static const size_t prpsoffsets32[] = { #ifdef USE_NT_PSINFO 104, /* SunOS 5.x (command line) */ 88, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 100, /* SunOS 5.x (command line) */ 84, /* SunOS 5.x (short name) */ 44, /* Linux (command line) */ 28, /* Linux 2.0.36 (short name) */ 8, /* FreeBSD */ }; static const size_t prpsoffsets64[] = { #ifdef USE_NT_PSINFO 152, /* SunOS 5.x (command line) */ 136, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 136, /* SunOS 5.x, 64-bit (command line) */ 120, /* SunOS 5.x, 64-bit (short name) */ 56, /* Linux (command line) */ 40, /* Linux (tested on core from 2.4.x, short name) */ 16, /* FreeBSD, 64-bit */ }; #define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0]) #define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0]) #define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64) /* * Look through the program headers of an executable image, searching * for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or * "FreeBSD"; if one is found, try looking in various places in its * contents for a 16-character string containing only printable * characters - if found, that string should be the name of the program * that dropped core. Note: right after that 16-character string is, * at least in SunOS 5.x (and possibly other SVR4-flavored systems) and * Linux, a longer string (80 characters, in 5.x, probably other * SVR4-flavored systems, and Linux) containing the start of the * command line for that program. * * SunOS 5.x core files contain two PT_NOTE sections, with the types * NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the * same info about the command name and command line, so it probably * isn't worthwhile to look for NT_PSINFO, but the offsets are provided * above (see USE_NT_PSINFO), in case we ever decide to do so. The * NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent; * the SunOS 5.x file command relies on this (and prefers the latter). * * The signal number probably appears in a section of type NT_PRSTATUS, * but that's also rather OS-dependent, in ways that are harder to * dissect with heuristics, so I'm not bothering with the signal number. * (I suppose the signal number could be of interest in situations where * you don't have the binary of the program that dropped core; if you * *do* have that binary, the debugger will probably tell you what * signal it was.) */ #define OS_STYLE_SVR4 0 #define OS_STYLE_FREEBSD 1 #define OS_STYLE_NETBSD 2 private const char os_style_names[][8] = { "SVR4", "FreeBSD", "NetBSD", }; #define FLAGS_DID_CORE 0x01 #define FLAGS_DID_NOTE 0x02 #define FLAGS_DID_BUILD_ID 0x04 #define FLAGS_DID_CORE_STYLE 0x08 #define FLAGS_IS_CORE 0x10 private int dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; if (xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags); if (offset == 0) break; } } return 0; } #endif static void do_note_netbsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for NetBSD") == -1) return; /* * The version number used to be stuck as 199905, and was thus * basically content-free. Newer versions of NetBSD have fixed * this and now use the encoding of __NetBSD_Version__: * * MMmmrrpp00 * * M = major version * m = minor version * r = release ["",A-Z,Z[A-Z] but numeric] * p = patchlevel */ if (desc > 100000000U) { uint32_t ver_patch = (desc / 100) % 100; uint32_t ver_rel = (desc / 10000) % 100; uint32_t ver_min = (desc / 1000000) % 100; uint32_t ver_maj = desc / 100000000; if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1) return; if (ver_rel == 0 && ver_patch != 0) { if (file_printf(ms, ".%u", ver_patch) == -1) return; } else if (ver_rel != 0) { while (ver_rel > 26) { if (file_printf(ms, "Z") == -1) return; ver_rel -= 26; } if (file_printf(ms, "%c", 'A' + ver_rel - 1) == -1) return; } } } static void do_note_freebsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for FreeBSD") == -1) return; /* * Contents is __FreeBSD_version, whose relation to OS * versions is defined by a huge table in the Porter's * Handbook. This is the general scheme: * * Releases: * Mmp000 (before 4.10) * Mmi0p0 (before 5.0) * Mmm0p0 * * Development branches: * Mmpxxx (before 4.6) * Mmp1xx (before 4.10) * Mmi1xx (before 5.0) * M000xx (pre-M.0) * Mmm1xx * * M = major version * m = minor version * i = minor version increment (491000 -> 4.10) * p = patchlevel * x = revision * * The first release of FreeBSD to use ELF by default * was version 3.0. */ if (desc == 460002) { if (file_printf(ms, " 4.6.2") == -1) return; } else if (desc < 460100) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10) == -1) return; if (desc / 1000 % 10 > 0) if (file_printf(ms, ".%d", desc / 1000 % 10) == -1) return; if ((desc % 1000 > 0) || (desc % 100000 == 0)) if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc < 500000) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10 + desc / 1000 % 10) == -1) return; if (desc / 100 % 10 > 0) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } else { if (file_printf(ms, " %d.%d", desc / 100000, desc / 1000 % 100) == -1) return; if ((desc / 100 % 10 > 0) || (desc % 100000 / 100 == 0)) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } } private size_t donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size, int clazz, int swap, size_t align, int *flags) { Elf32_Nhdr nh32; Elf64_Nhdr nh64; size_t noff, doff; #ifdef ELFCORE int os_style = -1; #endif uint32_t namesz, descsz; unsigned char *nbuf = CAST(unsigned char *, vbuf); if (xnh_sizeof + offset > size) { /* * We're out of note headers. */ return xnh_sizeof + offset; } (void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof); offset += xnh_sizeof; namesz = xnh_namesz; descsz = xnh_descsz; if ((namesz == 0) && (descsz == 0)) { /* * We're out of note headers. */ return (offset >= size) ? offset : size; } if (namesz & 0x80000000) { (void)file_printf(ms, ", bad note name size 0x%lx", (unsigned long)namesz); return offset; } if (descsz & 0x80000000) { (void)file_printf(ms, ", bad note description size 0x%lx", (unsigned long)descsz); return offset; } noff = offset; doff = ELF_ALIGN(offset + namesz); if (offset + namesz > size) { /* * We're past the end of the buffer. */ return doff; } offset = ELF_ALIGN(doff + descsz); if (doff + descsz > size) { /* * We're past the end of the buffer. */ return (offset >= size) ? offset : size; } if ((*flags & (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) == (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) goto core; if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 && xnh_type == NT_GNU_VERSION && descsz == 2) { file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]); } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_VERSION && descsz == 16) { uint32_t desc[4]; (void)memcpy(desc, &nbuf[doff], sizeof(desc)); if (file_printf(ms, ", for GNU/") == -1) return size; switch (elf_getu32(swap, desc[0])) { case GNU_OS_LINUX: if (file_printf(ms, "Linux") == -1) return size; break; case GNU_OS_HURD: if (file_printf(ms, "Hurd") == -1) return size; break; case GNU_OS_SOLARIS: if (file_printf(ms, "Solaris") == -1) return size; break; case GNU_OS_KFREEBSD: if (file_printf(ms, "kFreeBSD") == -1) return size; break; case GNU_OS_KNETBSD: if (file_printf(ms, "kNetBSD") == -1) return size; break; default: if (file_printf(ms, "<unknown>") == -1) return size; } if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]), elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) { uint8_t desc[20]; uint32_t i; if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" : "sha1") == -1) return size; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return size; *flags |= FLAGS_DID_BUILD_ID; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 && xnh_type == NT_NETBSD_PAX && descsz == 4) { static const char *pax[] = { "+mprotect", "-mprotect", "+segvguard", "-segvguard", "+ASLR", "-ASLR", }; uint32_t desc; size_t i; int did = 0; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (desc && file_printf(ms, ", PaX: ") == -1) return size; for (i = 0; i < __arraycount(pax); i++) { if (((1 << i) & desc) == 0) continue; if (file_printf(ms, "%s%s", did++ ? "," : "", pax[i]) == -1) return size; } } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { switch (xnh_type) { case NT_NETBSD_VERSION: if (descsz == 4) { do_note_netbsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } break; case NT_NETBSD_MARCH: if (file_printf(ms, ", compiled for: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; case NT_NETBSD_CMODEL: if (file_printf(ms, ", compiler model: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; default: if (file_printf(ms, ", note=%u", xnh_type) == -1) return size; break; } return size; } if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) { if (xnh_type == NT_FREEBSD_VERSION && descsz == 4) { do_note_freebsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 && xnh_type == NT_OPENBSD_VERSION && descsz == 4) { if (file_printf(ms, ", for OpenBSD") == -1) return size; /* Content of note is always 0 */ *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 && xnh_type == NT_DRAGONFLY_VERSION && descsz == 4) { uint32_t desc; if (file_printf(ms, ", for DragonFly") == -1) return size; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, " %d.%d.%d", desc / 100000, desc / 10000 % 10, desc % 10000) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } core: /* * Sigh. The 2.0.36 kernel in Debian 2.1, at * least, doesn't correctly implement name * sections, in core dumps, as specified by * the "Program Linking" section of "UNIX(R) System * V Release 4 Programmer's Guide: ANSI C and * Programming Support Tools", because my copy * clearly says "The first 'namesz' bytes in 'name' * contain a *null-terminated* [emphasis mine] * character representation of the entry's owner * or originator", but the 2.0.36 kernel code * doesn't include the terminating null in the * name.... */ if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) || (namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) { os_style = OS_STYLE_SVR4; } if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) { os_style = OS_STYLE_FREEBSD; } if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11) == 0)) { os_style = OS_STYLE_NETBSD; } #ifdef ELFCORE if ((*flags & FLAGS_DID_CORE) != 0) return size; if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) { if (file_printf(ms, ", %s-style", os_style_names[os_style]) == -1) return size; *flags |= FLAGS_DID_CORE_STYLE; } switch (os_style) { case OS_STYLE_NETBSD: if (xnh_type == NT_NETBSD_CORE_PROCINFO) { uint32_t signo; /* * Extract the program name. It is at * offset 0x7c, and is up to 32-bytes, * including the terminating NUL. */ if (file_printf(ms, ", from '%.31s'", &nbuf[doff + 0x7c]) == -1) return size; /* * Extract the signal number. It is at * offset 0x08. */ (void)memcpy(&signo, &nbuf[doff + 0x08], sizeof(signo)); if (file_printf(ms, " (signal %u)", elf_getu32(swap, signo)) == -1) return size; *flags |= FLAGS_DID_CORE; return size; } break; default: if (xnh_type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) { size_t i, j; unsigned char c; /* * Extract the program name. We assume * it to be 16 characters (that's what it * is in SunOS 5.x and Linux). * * Unfortunately, it's at a different offset * in various OSes, so try multiple offsets. * If the characters aren't all printable, * reject it. */ for (i = 0; i < NOFFSETS; i++) { unsigned char *cname, *cp; size_t reloffset = prpsoffsets(i); size_t noffset = doff + reloffset; size_t k; for (j = 0; j < 16; j++, noffset++, reloffset++) { /* * Make sure we're not past * the end of the buffer; if * we are, just give up. */ if (noffset >= size) goto tryanother; /* * Make sure we're not past * the end of the contents; * if we are, this obviously * isn't the right offset. */ if (reloffset >= descsz) goto tryanother; c = nbuf[noffset]; if (c == '\0') { /* * A '\0' at the * beginning is * obviously wrong. * Any other '\0' * means we're done. */ if (j == 0) goto tryanother; else break; } else { /* * A nonprintable * character is also * wrong. */ if (!isprint(c) || isquote(c)) goto tryanother; } } /* * Well, that worked. */ /* * Try next offsets, in case this match is * in the middle of a string. */ for (k = i + 1 ; k < NOFFSETS ; k++) { size_t no; int adjust = 1; if (prpsoffsets(k) >= prpsoffsets(i)) continue; for (no = doff + prpsoffsets(k); no < doff + prpsoffsets(i); no++) adjust = adjust && isprint(nbuf[no]); if (adjust) i = k; } cname = (unsigned char *) &nbuf[doff + prpsoffsets(i)]; for (cp = cname; *cp && isprint(*cp); cp++) continue; /* * Linux apparently appends a space at the end * of the command line: remove it. */ while (cp > cname && isspace(cp[-1])) cp--; if (file_printf(ms, ", from '%.*s'", (int)(cp - cname), cname) == -1) return size; *flags |= FLAGS_DID_CORE; return size; tryanother: ; } } break; } #endif return offset; } /* SunOS 5.x hardware capability descriptions */ typedef struct cap_desc { uint64_t cd_mask; const char *cd_name; } cap_desc_t; static const cap_desc_t cap_desc_sparc[] = { { AV_SPARC_MUL32, "MUL32" }, { AV_SPARC_DIV32, "DIV32" }, { AV_SPARC_FSMULD, "FSMULD" }, { AV_SPARC_V8PLUS, "V8PLUS" }, { AV_SPARC_POPC, "POPC" }, { AV_SPARC_VIS, "VIS" }, { AV_SPARC_VIS2, "VIS2" }, { AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" }, { AV_SPARC_FMAF, "FMAF" }, { AV_SPARC_FJFMAU, "FJFMAU" }, { AV_SPARC_IMA, "IMA" }, { 0, NULL } }; static const cap_desc_t cap_desc_386[] = { { AV_386_FPU, "FPU" }, { AV_386_TSC, "TSC" }, { AV_386_CX8, "CX8" }, { AV_386_SEP, "SEP" }, { AV_386_AMD_SYSC, "AMD_SYSC" }, { AV_386_CMOV, "CMOV" }, { AV_386_MMX, "MMX" }, { AV_386_AMD_MMX, "AMD_MMX" }, { AV_386_AMD_3DNow, "AMD_3DNow" }, { AV_386_AMD_3DNowx, "AMD_3DNowx" }, { AV_386_FXSR, "FXSR" }, { AV_386_SSE, "SSE" }, { AV_386_SSE2, "SSE2" }, { AV_386_PAUSE, "PAUSE" }, { AV_386_SSE3, "SSE3" }, { AV_386_MON, "MON" }, { AV_386_CX16, "CX16" }, { AV_386_AHF, "AHF" }, { AV_386_TSCP, "TSCP" }, { AV_386_AMD_SSE4A, "AMD_SSE4A" }, { AV_386_POPCNT, "POPCNT" }, { AV_386_AMD_LZCNT, "AMD_LZCNT" }, { AV_386_SSSE3, "SSSE3" }, { AV_386_SSE4_1, "SSE4.1" }, { AV_386_SSE4_2, "SSE4.2" }, { 0, NULL } }; private int doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int mach, int strtab) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */ char name[50]; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) == -1) { file_badread(ms); return -1; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if (pread(fd, name, sizeof(name), name_off + xsh_name) == -1) { file_badread(ms); return -1; } name[sizeof(name) - 1] = '\0'; if (strcmp(name, ".debug_info") == 0) stripped = 0; if (pread(fd, xsh_addr, xsh_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) == -1) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "0x%" INT64_T_FORMAT "x = 0x%" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; } /* * Look through the program headers of an executable image, searching * for a PT_INTERP section; if one is found, it's dynamically linked, * otherwise it's statically linked. */ private int dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int sh_num) { Elf32_Phdr ph32; Elf64_Phdr ph64; const char *linking_style = "statically"; const char *shared_libraries = ""; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; size_t offset, align, len; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xph_type) { case PT_DYNAMIC: linking_style = "dynamically"; break; case PT_INTERP: shared_libraries = " (uses shared libs)"; break; default: if (xph_offset > fsize) { /* Maybe warn here? */ continue; } break; } /* Things we can determine when we seek */ switch (xph_type) { case PT_NOTE: if ((align = xph_align) & 0x80000000UL) { if (file_printf(ms, ", invalid note alignment 0x%lx", (unsigned long)align) == -1) return -1; align = 4; } if (sh_num) break; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); bufsize = pread(fd, nbuf, len, xph_offset); if (bufsize == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, align, flags); if (offset == 0) break; } break; default: break; } } if (file_printf(ms, ", %s linked%s", linking_style, shared_libraries) == -1) return -1; return 0; } protected int file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf, size_t nbytes) { union { int32_t l; char c[sizeof (int32_t)]; } u; int clazz; int swap; struct stat st; off_t fsize; int flags = 0; Elf32_Ehdr elf32hdr; Elf64_Ehdr elf64hdr; uint16_t type; if (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) return 0; /* * ELF executables have multiple section headers in arbitrary * file locations and thus file(1) cannot determine it from easily. * Instead we traverse thru all section headers until a symbol table * one is found or else the binary is stripped. * Return immediately if it's not ELF (so we avoid pipe2file unless needed). */ if (buf[EI_MAG0] != ELFMAG0 || (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1) || buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) return 0; /* * If we cannot seek, it must be a pipe, socket or fifo. */ if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE)) fd = file_pipe2file(ms, fd, buf, nbytes); if (fstat(fd, &st) == -1) { file_badread(ms); return -1; } fsize = st.st_size; clazz = buf[EI_CLASS]; switch (clazz) { case ELFCLASS32: #undef elf_getu #define elf_getu(a, b) elf_getu32(a, b) #undef elfhdr #define elfhdr elf32hdr #include "elfclass.h" case ELFCLASS64: #undef elf_getu #define elf_getu(a, b) elf_getu64(a, b) #undef elfhdr #define elfhdr elf64hdr #include "elfclass.h" default: if (file_printf(ms, ", unknown class %d", clazz) == -1) return -1; break; } return 0; } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2295_1
crossvul-cpp_data_bad_1410_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP CCCC DDDD % % P P C D D % % PPPP C D D % % P C D D % % P CCCC DDDD % % % % % % Read/Write Photo CD Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/module.h" #include "MagickCore/utility.h" /* Forward declarations. */ static MagickBooleanType WritePCDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e c o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DecodeImage recovers the Huffman encoded luminance and chrominance % deltas. % % The format of the DecodeImage method is: % % MagickBooleanType DecodeImage(Image *image,unsigned char *luma, % unsigned char *chroma1,unsigned char *chroma2) % % A description of each parameter follows: % % o image: the address of a structure of type Image. % % o luma: the address of a character buffer that contains the % luminance information. % % o chroma1: the address of a character buffer that contains the % chrominance information. % % o chroma2: the address of a character buffer that contains the % chrominance information. % */ static MagickBooleanType DecodeImage(Image *image,unsigned char *luma, unsigned char *chroma1,unsigned char *chroma2,ExceptionInfo *exception) { #define IsSync(sum) ((sum & 0xffffff00UL) == 0xfffffe00UL) #define PCDGetBits(n) \ { \ sum=(sum << n) & 0xffffffff; \ bits-=n; \ while (bits <= 24) \ { \ if (p >= (buffer+0x800)) \ { \ count=ReadBlob(image,0x800,buffer); \ p=buffer; \ } \ sum|=((unsigned int) (*p) << (24-bits)); \ bits+=8; \ p++; \ } \ } typedef struct PCDTable { unsigned int length, sequence; MagickStatusType mask; unsigned char key; } PCDTable; PCDTable *pcd_table[3]; register ssize_t i, j; register PCDTable *r; register unsigned char *p, *q; size_t bits, length, plane, pcd_length[3], row, sum; ssize_t count, quantum; unsigned char *buffer; /* Initialize Huffman tables. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(luma != (unsigned char *) NULL); assert(chroma1 != (unsigned char *) NULL); assert(chroma2 != (unsigned char *) NULL); buffer=(unsigned char *) AcquireQuantumMemory(0x800,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); sum=0; bits=32; p=buffer+0x800; for (i=0; i < 3; i++) { pcd_table[i]=(PCDTable *) NULL; pcd_length[i]=0; } for (i=0; i < (image->columns > 1536 ? 3 : 1); i++) { PCDGetBits(8); length=(sum & 0xff)+1; pcd_table[i]=(PCDTable *) AcquireQuantumMemory(length, sizeof(*pcd_table[i])); if (pcd_table[i] == (PCDTable *) NULL) { buffer=(unsigned char *) RelinquishMagickMemory(buffer); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } r=pcd_table[i]; for (j=0; j < (ssize_t) length; j++) { PCDGetBits(8); r->length=(unsigned int) (sum & 0xff)+1; if (r->length > 16) { buffer=(unsigned char *) RelinquishMagickMemory(buffer); return(MagickFalse); } PCDGetBits(16); r->sequence=(unsigned int) (sum & 0xffff) << 16; PCDGetBits(8); r->key=(unsigned char) (sum & 0xff); r->mask=(~((1U << (32-r->length))-1)); r++; } pcd_length[i]=(size_t) length; } /* Search for Sync byte. */ for (i=0; i < 1; i++) PCDGetBits(16); for (i=0; i < 1; i++) PCDGetBits(16); while ((sum & 0x00fff000UL) != 0x00fff000UL) PCDGetBits(8); while (IsSync(sum) == 0) PCDGetBits(1); /* Recover the Huffman encoded luminance and chrominance deltas. */ count=0; length=0; plane=0; row=0; q=luma; for ( ; ; ) { if (IsSync(sum) != 0) { /* Determine plane and row number. */ PCDGetBits(16); row=((sum >> 9) & 0x1fff); if (row == image->rows) break; PCDGetBits(8); plane=sum >> 30; PCDGetBits(16); switch (plane) { case 0: { q=luma+row*image->columns; count=(ssize_t) image->columns; break; } case 2: { q=chroma1+(row >> 1)*image->columns; count=(ssize_t) (image->columns >> 1); plane--; break; } case 3: { q=chroma2+(row >> 1)*image->columns; count=(ssize_t) (image->columns >> 1); plane--; break; } default: { for (i=0; i < (image->columns > 1536 ? 3 : 1); i++) pcd_table[i]=(PCDTable *) RelinquishMagickMemory(pcd_table[i]); buffer=(unsigned char *) RelinquishMagickMemory(buffer); ThrowBinaryException(CorruptImageError,"CorruptImage", image->filename); } } length=pcd_length[plane]; continue; } /* Decode luminance or chrominance deltas. */ r=pcd_table[plane]; for (i=0; ((i < (ssize_t) length) && ((sum & r->mask) != r->sequence)); i++) r++; if ((row > image->rows) || (r == (PCDTable *) NULL)) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename); while ((sum & 0x00fff000) != 0x00fff000) PCDGetBits(8); while (IsSync(sum) == 0) PCDGetBits(1); continue; } if (r->key < 128) quantum=(ssize_t) (*q)+r->key; else quantum=(ssize_t) (*q)+r->key-256; *q=(unsigned char) ((quantum < 0) ? 0 : (quantum > 255) ? 255 : quantum); q++; PCDGetBits(r->length); count--; } /* Relinquish resources. */ for (i=0; i < (image->columns > 1536 ? 3 : 1); i++) pcd_table[i]=(PCDTable *) RelinquishMagickMemory(pcd_table[i]); buffer=(unsigned char *) RelinquishMagickMemory(buffer); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P C D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPCD() returns MagickTrue if the image format type, identified by the % magick string, is PCD. % % The format of the IsPCD method is: % % MagickBooleanType IsPCD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPCD(const unsigned char *magick,const size_t length) { if (length < 2052) return(MagickFalse); if (LocaleNCompare((const char *) magick+2048,"PCD_",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P C D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPCDImage() reads a Photo CD image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. Much of the PCD decoder was derived from % the program hpcdtoppm(1) by Hadmut Danisch. % % The format of the ReadPCDImage method is: % % image=ReadPCDImage(image_info) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *OverviewImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { Image *montage_image; MontageInfo *montage_info; register Image *p; /* Create the PCD Overview image. */ for (p=image; p != (Image *) NULL; p=p->next) { (void) DeleteImageProperty(p,"label"); (void) SetImageProperty(p,"label",DefaultTileLabel,exception); } montage_info=CloneMontageInfo(image_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image_info->filename, MagickPathExtent); montage_image=MontageImageList(image_info,montage_info,image,exception); montage_info=DestroyMontageInfo(montage_info); if (montage_image == (Image *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); image=DestroyImageList(image); return(montage_image); } static void Upsample(const size_t width,const size_t height, const size_t scaled_width,unsigned char *pixels) { register ssize_t x, y; register unsigned char *p, *q, *r; /* Create a new image that is a integral size greater than an existing one. */ assert(pixels != (unsigned char *) NULL); for (y=0; y < (ssize_t) height; y++) { p=pixels+(height-1-y)*scaled_width+(width-1); q=pixels+((height-1-y) << 1)*scaled_width+((width-1) << 1); *q=(*p); *(q+1)=(*(p)); for (x=1; x < (ssize_t) width; x++) { p--; q-=2; *q=(*p); *(q+1)=(unsigned char) ((((size_t) *p)+((size_t) *(p+1))+1) >> 1); } } for (y=0; y < (ssize_t) (height-1); y++) { p=pixels+((size_t) y << 1)*scaled_width; q=p+scaled_width; r=q+scaled_width; for (x=0; x < (ssize_t) (width-1); x++) { *q=(unsigned char) ((((size_t) *p)+((size_t) *r)+1) >> 1); *(q+1)=(unsigned char) ((((size_t) *p)+((size_t) *(p+2))+ ((size_t) *r)+((size_t) *(r+2))+2) >> 2); q+=2; p+=2; r+=2; } *q++=(unsigned char) ((((size_t) *p++)+((size_t) *r++)+1) >> 1); *q++=(unsigned char) ((((size_t) *p++)+((size_t) *r++)+1) >> 1); } p=pixels+(2*height-2)*scaled_width; q=pixels+(2*height-1)*scaled_width; (void) memcpy(q,p,(size_t) (2*width)); } static Image *ReadPCDImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define ThrowPCDException(exception,message) \ { \ if (header != (unsigned char *) NULL) \ header=(unsigned char *) RelinquishMagickMemory(header); \ if (luma != (unsigned char *) NULL) \ luma=(unsigned char *) RelinquishMagickMemory(luma); \ if (chroma2 != (unsigned char *) NULL) \ chroma2=(unsigned char *) RelinquishMagickMemory(chroma2); \ if (chroma1 != (unsigned char *) NULL) \ chroma1=(unsigned char *) RelinquishMagickMemory(chroma1); \ ThrowReaderException((exception),(message)); \ } Image *image; MagickBooleanType status; MagickOffsetType offset; MagickSizeType number_pixels; register ssize_t i, y; register Quantum *q; register unsigned char *c1, *c2, *yy; size_t height, number_images, rotate, scene, width; ssize_t count, x; unsigned char *chroma1, *chroma2, *header, *luma; unsigned int overview; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Determine if this a PCD file. */ header=(unsigned char *) AcquireQuantumMemory(0x800,3UL*sizeof(*header)); if (header == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); chroma1=(unsigned char *) NULL; chroma2=(unsigned char *) NULL; luma=(unsigned char *) NULL; count=ReadBlob(image,3*0x800,header); if (count != (3*0x800)) ThrowPCDException(CorruptImageError,"ImproperImageHeader"); overview=LocaleNCompare((char *) header,"PCD_OPA",7) == 0; if ((LocaleNCompare((char *) header+0x800,"PCD",3) != 0) && (overview == 0)) ThrowPCDException(CorruptImageError,"ImproperImageHeader"); rotate=header[0x0e02] & 0x03; number_images=((header[10] << 8) | header[11]) & 0xffff; header=(unsigned char *) RelinquishMagickMemory(header); if ((overview != 0) && (AcquireMagickResource(ListLengthResource,number_images) == MagickFalse)) ThrowPCDException(ResourceLimitError,"ListLengthExceedsLimit"); /* Determine resolution by scene specification. */ if ((image->columns == 0) || (image->rows == 0)) scene=3; else { width=192; height=128; for (scene=1; scene < 6; scene++) { if ((width >= image->columns) && (height >= image->rows)) break; width<<=1; height<<=1; } } if (image_info->number_scenes != 0) scene=(size_t) MagickMin(image_info->scene,6); if (overview != 0) scene=1; /* Initialize image structure. */ width=192; height=128; for (i=1; i < (ssize_t) MagickMin(scene,3); i++) { width<<=1; height<<=1; } image->columns=width; image->rows=height; image->depth=8; for ( ; i < (ssize_t) scene; i++) { image->columns<<=1; image->rows<<=1; } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Allocate luma and chroma memory. */ number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != (size_t) number_pixels) ThrowPCDException(ResourceLimitError,"MemoryAllocationFailed"); chroma1=(unsigned char *) AcquireQuantumMemory(image->columns+1UL,image->rows* 10*sizeof(*chroma1)); chroma2=(unsigned char *) AcquireQuantumMemory(image->columns+1UL,image->rows* 10*sizeof(*chroma2)); luma=(unsigned char *) AcquireQuantumMemory(image->columns+1UL,image->rows* 10*sizeof(*luma)); if ((chroma1 == (unsigned char *) NULL) || (chroma2 == (unsigned char *) NULL) || (luma == (unsigned char *) NULL)) ThrowPCDException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(chroma1,0,(image->columns+1UL)*image->rows* 10*sizeof(*chroma1)); (void) memset(chroma2,0,(image->columns+1UL)*image->rows* 10*sizeof(*chroma2)); (void) memset(luma,0,(image->columns+1UL)*image->rows* 10*sizeof(*luma)); /* Advance to image data. */ offset=93; if (overview != 0) offset=2; else if (scene == 2) offset=20; else if (scene <= 1) offset=1; for (i=0; i < (ssize_t) (offset*0x800); i++) if (ReadBlobByte(image) == EOF) ThrowPCDException(CorruptImageError,"UnexpectedEndOfFile"); if (overview != 0) { MagickProgressMonitor progress_monitor; register ssize_t j; /* Read thumbnails from overview image. */ for (j=1; j <= (ssize_t) number_images; j++) { progress_monitor=SetImageProgressMonitor(image, (MagickProgressMonitor) NULL,image->client_data); (void) FormatLocaleString(image->filename,MagickPathExtent, "images/img%04ld.pcd",(long) j); (void) FormatLocaleString(image->magick_filename,MagickPathExtent, "images/img%04ld.pcd",(long) j); image->scene=(size_t) j; image->columns=width; image->rows=height; image->depth=8; yy=luma; c1=chroma1; c2=chroma2; for (y=0; y < (ssize_t) height; y+=2) { count=ReadBlob(image,width,yy); yy+=image->columns; count=ReadBlob(image,width,yy); yy+=image->columns; count=ReadBlob(image,width >> 1,c1); c1+=image->columns; count=ReadBlob(image,width >> 1,c2); c2+=image->columns; if (EOFBlob(image) != MagickFalse) ThrowPCDException(CorruptImageError,"UnexpectedEndOfFile"); } Upsample(image->columns >> 1,image->rows >> 1,image->columns,chroma1); Upsample(image->columns >> 1,image->rows >> 1,image->columns,chroma2); /* Transfer luminance and chrominance channels. */ yy=luma; c1=chroma1; c2=chroma2; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*yy++),q); SetPixelGreen(image,ScaleCharToQuantum(*c1++),q); SetPixelBlue(image,ScaleCharToQuantum(*c2++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } image->colorspace=YCCColorspace; if (LocaleCompare(image_info->magick,"PCDS") == 0) (void) SetImageColorspace(image,sRGBColorspace,exception); if (EOFBlob(image) != MagickFalse) break; if (j < (ssize_t) number_images) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); } (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,j-1,number_images); if (status == MagickFalse) break; } } chroma2=(unsigned char *) RelinquishMagickMemory(chroma2); chroma1=(unsigned char *) RelinquishMagickMemory(chroma1); luma=(unsigned char *) RelinquishMagickMemory(luma); if (status == MagickFalse) return(DestroyImageList(image)); return(OverviewImage(image_info,GetFirstImageInList(image),exception)); } /* Read interleaved image. */ yy=luma; c1=chroma1; c2=chroma2; for (y=0; y < (ssize_t) height; y+=2) { count=ReadBlob(image,width,yy); yy+=image->columns; count=ReadBlob(image,width,yy); yy+=image->columns; count=ReadBlob(image,width >> 1,c1); c1+=image->columns; count=ReadBlob(image,width >> 1,c2); c2+=image->columns; if (EOFBlob(image) != MagickFalse) ThrowPCDException(CorruptImageError,"UnexpectedEndOfFile"); } if (scene >= 4) { /* Recover luminance deltas for 1536x1024 image. */ Upsample(768,512,image->columns,luma); Upsample(384,256,image->columns,chroma1); Upsample(384,256,image->columns,chroma2); image->rows=1024; for (i=0; i < (4*0x800); i++) (void) ReadBlobByte(image); status=DecodeImage(image,luma,chroma1,chroma2,exception); if ((scene >= 5) && status) { /* Recover luminance deltas for 3072x2048 image. */ Upsample(1536,1024,image->columns,luma); Upsample(768,512,image->columns,chroma1); Upsample(768,512,image->columns,chroma2); image->rows=2048; offset=TellBlob(image)/0x800+12; offset=SeekBlob(image,offset*0x800,SEEK_SET); status=DecodeImage(image,luma,chroma1,chroma2,exception); if ((scene >= 6) && (status != MagickFalse)) { /* Recover luminance deltas for 6144x4096 image (vaporware). */ Upsample(3072,2048,image->columns,luma); Upsample(1536,1024,image->columns,chroma1); Upsample(1536,1024,image->columns,chroma2); image->rows=4096; } } } Upsample(image->columns >> 1,image->rows >> 1,image->columns,chroma1); Upsample(image->columns >> 1,image->rows >> 1,image->columns,chroma2); /* Transfer luminance and chrominance channels. */ yy=luma; c1=chroma1; c2=chroma2; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*yy++),q); SetPixelGreen(image,ScaleCharToQuantum(*c1++),q); SetPixelBlue(image,ScaleCharToQuantum(*c2++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } chroma2=(unsigned char *) RelinquishMagickMemory(chroma2); chroma1=(unsigned char *) RelinquishMagickMemory(chroma1); luma=(unsigned char *) RelinquishMagickMemory(luma); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); if (image_info->ping == MagickFalse) if ((rotate == 1) || (rotate == 3)) { double degrees; Image *rotate_image; /* Rotate image. */ degrees=rotate == 1 ? -90.0 : 90.0; rotate_image=RotateImage(image,degrees,exception); if (rotate_image != (Image *) NULL) { image=DestroyImage(image); image=rotate_image; } } /* Set CCIR 709 primaries with a D65 white point. */ image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->gamma=1.000f/2.200f; image->colorspace=YCCColorspace; if (LocaleCompare(image_info->magick,"PCDS") == 0) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image_info->scene != 0) for (i=0; i < (ssize_t) image_info->scene; i++) AppendImageToList(&image,CloneImage(image,0,0,MagickTrue,exception)); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P C D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPCDImage() adds attributes for the PCD image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPCDImage method is: % % size_t RegisterPCDImage(void) % */ ModuleExport size_t RegisterPCDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PCD","PCD","Photo CD"); entry->decoder=(DecodeImageHandler *) ReadPCDImage; entry->encoder=(EncodeImageHandler *) WritePCDImage; entry->magick=(IsImageFormatHandler *) IsPCD; entry->flags^=CoderAdjoinFlag; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PCD","PCDS","Photo CD"); entry->decoder=(DecodeImageHandler *) ReadPCDImage; entry->encoder=(EncodeImageHandler *) WritePCDImage; entry->flags^=CoderAdjoinFlag; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P C D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPCDImage() removes format registrations made by the % PCD module from the list of supported formats. % % The format of the UnregisterPCDImage method is: % % UnregisterPCDImage(void) % */ ModuleExport void UnregisterPCDImage(void) { (void) UnregisterMagickInfo("PCD"); (void) UnregisterMagickInfo("PCDS"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P C D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePCDImage() writes an image in the Photo CD encoded image format. % % The format of the WritePCDImage method is: % % MagickBooleanType WritePCDImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePCDTile(Image *image,const char *page_geometry, const size_t tile_columns,const size_t tile_rows,ExceptionInfo *exception) { GeometryInfo geometry_info; Image *downsample_image, *tile_image; MagickBooleanType status; MagickStatusType flags; RectangleInfo geometry; register const Quantum *p, *q; register ssize_t i, x; ssize_t y; /* Scale image to tile size. */ SetGeometry(image,&geometry); (void) ParseMetaGeometry(page_geometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); if ((geometry.width % 2) != 0) geometry.width--; if ((geometry.height % 2) != 0) geometry.height--; tile_image=ResizeImage(image,geometry.width,geometry.height,TriangleFilter, exception); if (tile_image == (Image *) NULL) return(MagickFalse); flags=ParseGeometry(page_geometry,&geometry_info); geometry.width=(size_t) geometry_info.rho; geometry.height=(size_t) geometry_info.sigma; if ((flags & SigmaValue) == 0) geometry.height=geometry.width; if ((tile_image->columns != geometry.width) || (tile_image->rows != geometry.height)) { Image *bordered_image; RectangleInfo border_info; /* Put a border around the image. */ border_info.width=(geometry.width-tile_image->columns+1) >> 1; border_info.height=(geometry.height-tile_image->rows+1) >> 1; bordered_image=BorderImage(tile_image,&border_info,image->compose, exception); if (bordered_image == (Image *) NULL) return(MagickFalse); tile_image=DestroyImage(tile_image); tile_image=bordered_image; } if ((tile_image->columns != tile_columns) || (tile_image->rows != tile_rows)) { Image *resize_image; resize_image=ResizeImage(tile_image,tile_columns,tile_rows, tile_image->filter,exception); if (resize_image != (Image *) NULL) { tile_image=DestroyImage(tile_image); tile_image=resize_image; } } (void) TransformImageColorspace(tile_image,YCCColorspace,exception); downsample_image=ResizeImage(tile_image,tile_image->columns/2, tile_image->rows/2,TriangleFilter,exception); if (downsample_image == (Image *) NULL) return(MagickFalse); /* Write tile to PCD file. */ for (y=0; y < (ssize_t) tile_image->rows; y+=2) { p=GetVirtualPixels(tile_image,0,y,tile_image->columns,2,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) (tile_image->columns << 1); x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(tile_image,p))); p+=GetPixelChannels(tile_image); } q=GetVirtualPixels(downsample_image,0,y >> 1,downsample_image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) downsample_image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar( GetPixelGreen(tile_image,q))); q+=GetPixelChannels(tile_image); } q=GetVirtualPixels(downsample_image,0,y >> 1,downsample_image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) downsample_image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar( GetPixelBlue(tile_image,q))); q+=GetPixelChannels(tile_image); } status=SetImageProgress(image,SaveImageTag,y,tile_image->rows); if (status == MagickFalse) break; } for (i=0; i < 0x800; i++) (void) WriteBlobByte(image,'\0'); downsample_image=DestroyImage(downsample_image); tile_image=DestroyImage(tile_image); return(MagickTrue); } static MagickBooleanType WritePCDImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { Image *pcd_image; MagickBooleanType status; register ssize_t i; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); pcd_image=image; if (image->columns < image->rows) { Image *rotate_image; /* Rotate portrait to landscape. */ rotate_image=RotateImage(image,90.0,exception); if (rotate_image == (Image *) NULL) return(MagickFalse); pcd_image=rotate_image; DestroyBlob(rotate_image); pcd_image->blob=ReferenceBlob(image->blob); } /* Open output image file. */ status=OpenBlob(image_info,pcd_image,WriteBinaryBlobMode,exception); if (status == MagickFalse) { if (pcd_image != image) pcd_image=DestroyImage(pcd_image); return(status); } if (IssRGBCompatibleColorspace(pcd_image->colorspace) == MagickFalse) (void) TransformImageColorspace(pcd_image,sRGBColorspace,exception); /* Write PCD image header. */ for (i=0; i < 32; i++) (void) WriteBlobByte(pcd_image,0xff); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x0e); for (i=0; i < 8; i++) (void) WriteBlobByte(pcd_image,'\0'); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x01); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x05); for (i=0; i < 8; i++) (void) WriteBlobByte(pcd_image,'\0'); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x0A); for (i=0; i < 36; i++) (void) WriteBlobByte(pcd_image,'\0'); for (i=0; i < 4; i++) (void) WriteBlobByte(pcd_image,0x01); for (i=0; i < 1944; i++) (void) WriteBlobByte(pcd_image,'\0'); (void) WriteBlob(pcd_image,7,(const unsigned char *) "PCD_IPI"); (void) WriteBlobByte(pcd_image,0x06); for (i=0; i < 1530; i++) (void) WriteBlobByte(pcd_image,'\0'); if (image->columns < image->rows) (void) WriteBlobByte(pcd_image,'\1'); else (void) WriteBlobByte(pcd_image,'\0'); for (i=0; i < (3*0x800-1539); i++) (void) WriteBlobByte(pcd_image,'\0'); /* Write PCD tiles. */ status=WritePCDTile(pcd_image,"768x512>",192,128,exception); status=WritePCDTile(pcd_image,"768x512>",384,256,exception); status=WritePCDTile(pcd_image,"768x512>",768,512,exception); (void) CloseBlob(pcd_image); if (pcd_image != image) pcd_image=DestroyImage(pcd_image); return(status); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_1410_0
crossvul-cpp_data_bad_814_0
/*- * Copyright (c) 2004-2013 Tim Kientzle * Copyright (c) 2011-2012,2014 Michihiro NAKAJIMA * Copyright (c) 2013 Konrad Kleine * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" __FBSDID("$FreeBSD: head/lib/libarchive/archive_read_support_format_zip.c 201102 2009-12-28 03:11:36Z kientzle $"); /* * The definitive documentation of the Zip file format is: * http://www.pkware.com/documents/casestudies/APPNOTE.TXT * * The Info-Zip project has pioneered various extensions to better * support Zip on Unix, including the 0x5455 "UT", 0x5855 "UX", 0x7855 * "Ux", and 0x7875 "ux" extensions for time and ownership * information. * * History of this code: The streaming Zip reader was first added to * libarchive in January 2005. Support for seekable input sources was * added in Nov 2011. Zip64 support (including a significant code * refactoring) was added in 2014. */ #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_ZLIB_H #include <zlib.h> #endif #ifdef HAVE_BZLIB_H #include <bzlib.h> #endif #ifdef HAVE_LZMA_H #include <lzma.h> #endif #include "archive.h" #include "archive_digest_private.h" #include "archive_cryptor_private.h" #include "archive_endian.h" #include "archive_entry.h" #include "archive_entry_locale.h" #include "archive_hmac_private.h" #include "archive_private.h" #include "archive_rb.h" #include "archive_read_private.h" #include "archive_ppmd8_private.h" #ifndef HAVE_ZLIB_H #include "archive_crc32.h" #endif struct zip_entry { struct archive_rb_node node; struct zip_entry *next; int64_t local_header_offset; int64_t compressed_size; int64_t uncompressed_size; int64_t gid; int64_t uid; struct archive_string rsrcname; time_t mtime; time_t atime; time_t ctime; uint32_t crc32; uint16_t mode; uint16_t zip_flags; /* From GP Flags Field */ unsigned char compression; unsigned char system; /* From "version written by" */ unsigned char flags; /* Our extra markers. */ unsigned char decdat;/* Used for Decryption check */ /* WinZip AES encryption extra field should be available * when compression is 99. */ struct { /* Vendor version: AE-1 - 0x0001, AE-2 - 0x0002 */ unsigned vendor; #define AES_VENDOR_AE_1 0x0001 #define AES_VENDOR_AE_2 0x0002 /* AES encryption strength: * 1 - 128 bits, 2 - 192 bits, 2 - 256 bits. */ unsigned strength; /* Actual compression method. */ unsigned char compression; } aes_extra; }; struct trad_enc_ctx { uint32_t keys[3]; }; /* Bits used in zip_flags. */ #define ZIP_ENCRYPTED (1 << 0) #define ZIP_LENGTH_AT_END (1 << 3) #define ZIP_STRONG_ENCRYPTED (1 << 6) #define ZIP_UTF8_NAME (1 << 11) /* See "7.2 Single Password Symmetric Encryption Method" in http://www.pkware.com/documents/casestudies/APPNOTE.TXT */ #define ZIP_CENTRAL_DIRECTORY_ENCRYPTED (1 << 13) /* Bits used in flags. */ #define LA_USED_ZIP64 (1 << 0) #define LA_FROM_CENTRAL_DIRECTORY (1 << 1) /* * See "WinZip - AES Encryption Information" * http://www.winzip.com/aes_info.htm */ /* Value used in compression method. */ #define WINZIP_AES_ENCRYPTION 99 /* Authentication code size. */ #define AUTH_CODE_SIZE 10 /**/ #define MAX_DERIVED_KEY_BUF_SIZE (AES_MAX_KEY_SIZE * 2 + 2) struct zip { /* Structural information about the archive. */ struct archive_string format_name; int64_t central_directory_offset; size_t central_directory_entries_total; size_t central_directory_entries_on_this_disk; int has_encrypted_entries; /* List of entries (seekable Zip only) */ struct zip_entry *zip_entries; struct archive_rb_tree tree; struct archive_rb_tree tree_rsrc; /* Bytes read but not yet consumed via __archive_read_consume() */ size_t unconsumed; /* Information about entry we're currently reading. */ struct zip_entry *entry; int64_t entry_bytes_remaining; /* These count the number of bytes actually read for the entry. */ int64_t entry_compressed_bytes_read; int64_t entry_uncompressed_bytes_read; /* Running CRC32 of the decompressed data */ unsigned long entry_crc32; unsigned long (*crc32func)(unsigned long, const void *, size_t); char ignore_crc32; /* Flags to mark progress of decompression. */ char decompress_init; char end_of_entry; unsigned char *uncompressed_buffer; size_t uncompressed_buffer_size; #ifdef HAVE_ZLIB_H z_stream stream; char stream_valid; #endif #if HAVE_LZMA_H && HAVE_LIBLZMA lzma_stream zipx_lzma_stream; char zipx_lzma_valid; #endif #ifdef HAVE_BZLIB_H bz_stream bzstream; char bzstream_valid; #endif IByteIn zipx_ppmd_stream; ssize_t zipx_ppmd_read_compressed; CPpmd8 ppmd8; char ppmd8_valid; char ppmd8_stream_failed; struct archive_string_conv *sconv; struct archive_string_conv *sconv_default; struct archive_string_conv *sconv_utf8; int init_default_conversion; int process_mac_extensions; char init_decryption; /* Decryption buffer. */ /* * The decrypted data starts at decrypted_ptr and * extends for decrypted_bytes_remaining. Decryption * adds new data to the end of this block, data is returned * to clients from the beginning. When the block hits the * end of decrypted_buffer, it has to be shuffled back to * the beginning of the buffer. */ unsigned char *decrypted_buffer; unsigned char *decrypted_ptr; size_t decrypted_buffer_size; size_t decrypted_bytes_remaining; size_t decrypted_unconsumed_bytes; /* Traditional PKWARE decryption. */ struct trad_enc_ctx tctx; char tctx_valid; /* WinZip AES decryption. */ /* Contexts used for AES decryption. */ archive_crypto_ctx cctx; char cctx_valid; archive_hmac_sha1_ctx hctx; char hctx_valid; /* Strong encryption's decryption header information. */ unsigned iv_size; unsigned alg_id; unsigned bit_len; unsigned flags; unsigned erd_size; unsigned v_size; unsigned v_crc32; uint8_t *iv; uint8_t *erd; uint8_t *v_data; }; /* Many systems define min or MIN, but not all. */ #define zipmin(a,b) ((a) < (b) ? (a) : (b)) /* This function is used by Ppmd8_DecodeSymbol during decompression of Ppmd8 * streams inside ZIP files. It has 2 purposes: one is to fetch the next * compressed byte from the stream, second one is to increase the counter how * many compressed bytes were read. */ static Byte ppmd_read(void* p) { /* Get the handle to current decompression context. */ struct archive_read *a = ((IByteIn*)p)->a; struct zip *zip = (struct zip*) a->format->data; ssize_t bytes_avail = 0; /* Fetch next byte. */ const uint8_t* data = __archive_read_ahead(a, 1, &bytes_avail); if(bytes_avail < 1) { zip->ppmd8_stream_failed = 1; return 0; } __archive_read_consume(a, 1); /* Increment the counter. */ ++zip->zipx_ppmd_read_compressed; /* Return the next compressed byte. */ return data[0]; } /* ------------------------------------------------------------------------ */ /* Traditional PKWARE Decryption functions. */ static void trad_enc_update_keys(struct trad_enc_ctx *ctx, uint8_t c) { uint8_t t; #define CRC32(c, b) (crc32(c ^ 0xffffffffUL, &b, 1) ^ 0xffffffffUL) ctx->keys[0] = CRC32(ctx->keys[0], c); ctx->keys[1] = (ctx->keys[1] + (ctx->keys[0] & 0xff)) * 134775813L + 1; t = (ctx->keys[1] >> 24) & 0xff; ctx->keys[2] = CRC32(ctx->keys[2], t); #undef CRC32 } static uint8_t trad_enc_decrypt_byte(struct trad_enc_ctx *ctx) { unsigned temp = ctx->keys[2] | 2; return (uint8_t)((temp * (temp ^ 1)) >> 8) & 0xff; } static void trad_enc_decrypt_update(struct trad_enc_ctx *ctx, const uint8_t *in, size_t in_len, uint8_t *out, size_t out_len) { unsigned i, max; max = (unsigned)((in_len < out_len)? in_len: out_len); for (i = 0; i < max; i++) { uint8_t t = in[i] ^ trad_enc_decrypt_byte(ctx); out[i] = t; trad_enc_update_keys(ctx, t); } } static int trad_enc_init(struct trad_enc_ctx *ctx, const char *pw, size_t pw_len, const uint8_t *key, size_t key_len, uint8_t *crcchk) { uint8_t header[12]; if (key_len < 12) { *crcchk = 0xff; return -1; } ctx->keys[0] = 305419896L; ctx->keys[1] = 591751049L; ctx->keys[2] = 878082192L; for (;pw_len; --pw_len) trad_enc_update_keys(ctx, *pw++); trad_enc_decrypt_update(ctx, key, 12, header, 12); /* Return the last byte for CRC check. */ *crcchk = header[11]; return 0; } #if 0 static void crypt_derive_key_sha1(const void *p, int size, unsigned char *key, int key_size) { #define MD_SIZE 20 archive_sha1_ctx ctx; unsigned char md1[MD_SIZE]; unsigned char md2[MD_SIZE * 2]; unsigned char mkb[64]; int i; archive_sha1_init(&ctx); archive_sha1_update(&ctx, p, size); archive_sha1_final(&ctx, md1); memset(mkb, 0x36, sizeof(mkb)); for (i = 0; i < MD_SIZE; i++) mkb[i] ^= md1[i]; archive_sha1_init(&ctx); archive_sha1_update(&ctx, mkb, sizeof(mkb)); archive_sha1_final(&ctx, md2); memset(mkb, 0x5C, sizeof(mkb)); for (i = 0; i < MD_SIZE; i++) mkb[i] ^= md1[i]; archive_sha1_init(&ctx); archive_sha1_update(&ctx, mkb, sizeof(mkb)); archive_sha1_final(&ctx, md2 + MD_SIZE); if (key_size > 32) key_size = 32; memcpy(key, md2, key_size); #undef MD_SIZE } #endif /* * Common code for streaming or seeking modes. * * Includes code to read local file headers, decompress data * from entry bodies, and common API. */ static unsigned long real_crc32(unsigned long crc, const void *buff, size_t len) { return crc32(crc, buff, (unsigned int)len); } /* Used by "ignorecrc32" option to speed up tests. */ static unsigned long fake_crc32(unsigned long crc, const void *buff, size_t len) { (void)crc; /* UNUSED */ (void)buff; /* UNUSED */ (void)len; /* UNUSED */ return 0; } static const struct { int id; const char * name; } compression_methods[] = { {0, "uncompressed"}, /* The file is stored (no compression) */ {1, "shrinking"}, /* The file is Shrunk */ {2, "reduced-1"}, /* The file is Reduced with compression factor 1 */ {3, "reduced-2"}, /* The file is Reduced with compression factor 2 */ {4, "reduced-3"}, /* The file is Reduced with compression factor 3 */ {5, "reduced-4"}, /* The file is Reduced with compression factor 4 */ {6, "imploded"}, /* The file is Imploded */ {7, "reserved"}, /* Reserved for Tokenizing compression algorithm */ {8, "deflation"}, /* The file is Deflated */ {9, "deflation-64-bit"}, /* Enhanced Deflating using Deflate64(tm) */ {10, "ibm-terse"},/* PKWARE Data Compression Library Imploding * (old IBM TERSE) */ {11, "reserved"}, /* Reserved by PKWARE */ {12, "bzip"}, /* File is compressed using BZIP2 algorithm */ {13, "reserved"}, /* Reserved by PKWARE */ {14, "lzma"}, /* LZMA (EFS) */ {15, "reserved"}, /* Reserved by PKWARE */ {16, "reserved"}, /* Reserved by PKWARE */ {17, "reserved"}, /* Reserved by PKWARE */ {18, "ibm-terse-new"}, /* File is compressed using IBM TERSE (new) */ {19, "ibm-lz777"},/* IBM LZ77 z Architecture (PFS) */ {95, "xz"}, /* XZ compressed data */ {96, "jpeg"}, /* JPEG compressed data */ {97, "wav-pack"}, /* WavPack compressed data */ {98, "ppmd-1"}, /* PPMd version I, Rev 1 */ {99, "aes"} /* WinZip AES encryption */ }; static const char * compression_name(const int compression) { static const int num_compression_methods = sizeof(compression_methods)/sizeof(compression_methods[0]); int i=0; while(compression >= 0 && i < num_compression_methods) { if (compression_methods[i].id == compression) return compression_methods[i].name; i++; } return "??"; } /* Convert an MSDOS-style date/time into Unix-style time. */ static time_t zip_time(const char *p) { int msTime, msDate; struct tm ts; msTime = (0xff & (unsigned)p[0]) + 256 * (0xff & (unsigned)p[1]); msDate = (0xff & (unsigned)p[2]) + 256 * (0xff & (unsigned)p[3]); memset(&ts, 0, sizeof(ts)); ts.tm_year = ((msDate >> 9) & 0x7f) + 80; /* Years since 1900. */ ts.tm_mon = ((msDate >> 5) & 0x0f) - 1; /* Month number. */ ts.tm_mday = msDate & 0x1f; /* Day of month. */ ts.tm_hour = (msTime >> 11) & 0x1f; ts.tm_min = (msTime >> 5) & 0x3f; ts.tm_sec = (msTime << 1) & 0x3e; ts.tm_isdst = -1; return mktime(&ts); } /* * The extra data is stored as a list of * id1+size1+data1 + id2+size2+data2 ... * triplets. id and size are 2 bytes each. */ static int process_extra(struct archive_read *a, const char *p, size_t extra_length, struct zip_entry* zip_entry) { unsigned offset = 0; if (extra_length == 0) { return ARCHIVE_OK; } if (extra_length < 4) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Too-small extra data: Need at least 4 bytes, but only found %d bytes", (int)extra_length); return ARCHIVE_FAILED; } while (offset <= extra_length - 4) { unsigned short headerid = archive_le16dec(p + offset); unsigned short datasize = archive_le16dec(p + offset + 2); offset += 4; if (offset + datasize > extra_length) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Extra data overflow: Need %d bytes but only found %d bytes", (int)datasize, (int)(extra_length - offset)); return ARCHIVE_FAILED; } #ifdef DEBUG fprintf(stderr, "Header id 0x%04x, length %d\n", headerid, datasize); #endif switch (headerid) { case 0x0001: /* Zip64 extended information extra field. */ zip_entry->flags |= LA_USED_ZIP64; if (zip_entry->uncompressed_size == 0xffffffff) { uint64_t t = 0; if (datasize < 8 || (t = archive_le64dec(p + offset)) > INT64_MAX) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Malformed 64-bit uncompressed size"); return ARCHIVE_FAILED; } zip_entry->uncompressed_size = t; offset += 8; datasize -= 8; } if (zip_entry->compressed_size == 0xffffffff) { uint64_t t = 0; if (datasize < 8 || (t = archive_le64dec(p + offset)) > INT64_MAX) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Malformed 64-bit compressed size"); return ARCHIVE_FAILED; } zip_entry->compressed_size = t; offset += 8; datasize -= 8; } if (zip_entry->local_header_offset == 0xffffffff) { uint64_t t = 0; if (datasize < 8 || (t = archive_le64dec(p + offset)) > INT64_MAX) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Malformed 64-bit local header offset"); return ARCHIVE_FAILED; } zip_entry->local_header_offset = t; offset += 8; datasize -= 8; } /* archive_le32dec(p + offset) gives disk * on which file starts, but we don't handle * multi-volume Zip files. */ break; #ifdef DEBUG case 0x0017: { /* Strong encryption field. */ if (archive_le16dec(p + offset) == 2) { unsigned algId = archive_le16dec(p + offset + 2); unsigned bitLen = archive_le16dec(p + offset + 4); int flags = archive_le16dec(p + offset + 6); fprintf(stderr, "algId=0x%04x, bitLen=%u, " "flgas=%d\n", algId, bitLen,flags); } break; } #endif case 0x5455: { /* Extended time field "UT". */ int flags; if (datasize == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Incomplete extended time field"); return ARCHIVE_FAILED; } flags = p[offset]; offset++; datasize--; /* Flag bits indicate which dates are present. */ if (flags & 0x01) { #ifdef DEBUG fprintf(stderr, "mtime: %lld -> %d\n", (long long)zip_entry->mtime, archive_le32dec(p + offset)); #endif if (datasize < 4) break; zip_entry->mtime = archive_le32dec(p + offset); offset += 4; datasize -= 4; } if (flags & 0x02) { if (datasize < 4) break; zip_entry->atime = archive_le32dec(p + offset); offset += 4; datasize -= 4; } if (flags & 0x04) { if (datasize < 4) break; zip_entry->ctime = archive_le32dec(p + offset); offset += 4; datasize -= 4; } break; } case 0x5855: { /* Info-ZIP Unix Extra Field (old version) "UX". */ if (datasize >= 8) { zip_entry->atime = archive_le32dec(p + offset); zip_entry->mtime = archive_le32dec(p + offset + 4); } if (datasize >= 12) { zip_entry->uid = archive_le16dec(p + offset + 8); zip_entry->gid = archive_le16dec(p + offset + 10); } break; } case 0x6c78: { /* Experimental 'xl' field */ /* * Introduced Dec 2013 to provide a way to * include external file attributes (and other * fields that ordinarily appear only in * central directory) in local file header. * This provides file type and permission * information necessary to support full * streaming extraction. Currently being * discussed with other Zip developers * ... subject to change. * * Format: * The field starts with a bitmap that specifies * which additional fields are included. The * bitmap is variable length and can be extended in * the future. * * n bytes - feature bitmap: first byte has low-order * 7 bits. If high-order bit is set, a subsequent * byte holds the next 7 bits, etc. * * if bitmap & 1, 2 byte "version made by" * if bitmap & 2, 2 byte "internal file attributes" * if bitmap & 4, 4 byte "external file attributes" * if bitmap & 8, 2 byte comment length + n byte comment */ int bitmap, bitmap_last; if (datasize < 1) break; bitmap_last = bitmap = 0xff & p[offset]; offset += 1; datasize -= 1; /* We only support first 7 bits of bitmap; skip rest. */ while ((bitmap_last & 0x80) != 0 && datasize >= 1) { bitmap_last = p[offset]; offset += 1; datasize -= 1; } if (bitmap & 1) { /* 2 byte "version made by" */ if (datasize < 2) break; zip_entry->system = archive_le16dec(p + offset) >> 8; offset += 2; datasize -= 2; } if (bitmap & 2) { /* 2 byte "internal file attributes" */ uint32_t internal_attributes; if (datasize < 2) break; internal_attributes = archive_le16dec(p + offset); /* Not used by libarchive at present. */ (void)internal_attributes; /* UNUSED */ offset += 2; datasize -= 2; } if (bitmap & 4) { /* 4 byte "external file attributes" */ uint32_t external_attributes; if (datasize < 4) break; external_attributes = archive_le32dec(p + offset); if (zip_entry->system == 3) { zip_entry->mode = external_attributes >> 16; } else if (zip_entry->system == 0) { // Interpret MSDOS directory bit if (0x10 == (external_attributes & 0x10)) { zip_entry->mode = AE_IFDIR | 0775; } else { zip_entry->mode = AE_IFREG | 0664; } if (0x01 == (external_attributes & 0x01)) { // Read-only bit; strip write permissions zip_entry->mode &= 0555; } } else { zip_entry->mode = 0; } offset += 4; datasize -= 4; } if (bitmap & 8) { /* 2 byte comment length + comment */ uint32_t comment_length; if (datasize < 2) break; comment_length = archive_le16dec(p + offset); offset += 2; datasize -= 2; if (datasize < comment_length) break; /* Comment is not supported by libarchive */ offset += comment_length; datasize -= comment_length; } break; } case 0x7855: /* Info-ZIP Unix Extra Field (type 2) "Ux". */ #ifdef DEBUG fprintf(stderr, "uid %d gid %d\n", archive_le16dec(p + offset), archive_le16dec(p + offset + 2)); #endif if (datasize >= 2) zip_entry->uid = archive_le16dec(p + offset); if (datasize >= 4) zip_entry->gid = archive_le16dec(p + offset + 2); break; case 0x7875: { /* Info-Zip Unix Extra Field (type 3) "ux". */ int uidsize = 0, gidsize = 0; /* TODO: support arbitrary uidsize/gidsize. */ if (datasize >= 1 && p[offset] == 1) {/* version=1 */ if (datasize >= 4) { /* get a uid size. */ uidsize = 0xff & (int)p[offset+1]; if (uidsize == 2) zip_entry->uid = archive_le16dec( p + offset + 2); else if (uidsize == 4 && datasize >= 6) zip_entry->uid = archive_le32dec( p + offset + 2); } if (datasize >= (2 + uidsize + 3)) { /* get a gid size. */ gidsize = 0xff & (int)p[offset+2+uidsize]; if (gidsize == 2) zip_entry->gid = archive_le16dec( p+offset+2+uidsize+1); else if (gidsize == 4 && datasize >= (2 + uidsize + 5)) zip_entry->gid = archive_le32dec( p+offset+2+uidsize+1); } } break; } case 0x9901: /* WinZip AES extra data field. */ if (datasize < 6) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Incomplete AES field"); return ARCHIVE_FAILED; } if (p[offset + 2] == 'A' && p[offset + 3] == 'E') { /* Vendor version. */ zip_entry->aes_extra.vendor = archive_le16dec(p + offset); /* AES encryption strength. */ zip_entry->aes_extra.strength = p[offset + 4]; /* Actual compression method. */ zip_entry->aes_extra.compression = p[offset + 5]; } break; default: break; } offset += datasize; } if (offset != extra_length) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Malformed extra data: Consumed %d bytes of %d bytes", (int)offset, (int)extra_length); return ARCHIVE_FAILED; } return ARCHIVE_OK; } /* * Assumes file pointer is at beginning of local file header. */ static int zip_read_local_file_header(struct archive_read *a, struct archive_entry *entry, struct zip *zip) { const char *p; const void *h; const wchar_t *wp; const char *cp; size_t len, filename_length, extra_length; struct archive_string_conv *sconv; struct zip_entry *zip_entry = zip->entry; struct zip_entry zip_entry_central_dir; int ret = ARCHIVE_OK; char version; /* Save a copy of the original for consistency checks. */ zip_entry_central_dir = *zip_entry; zip->decompress_init = 0; zip->end_of_entry = 0; zip->entry_uncompressed_bytes_read = 0; zip->entry_compressed_bytes_read = 0; zip->entry_crc32 = zip->crc32func(0, NULL, 0); /* Setup default conversion. */ if (zip->sconv == NULL && !zip->init_default_conversion) { zip->sconv_default = archive_string_default_conversion_for_read(&(a->archive)); zip->init_default_conversion = 1; } if ((p = __archive_read_ahead(a, 30, NULL)) == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file header"); return (ARCHIVE_FATAL); } if (memcmp(p, "PK\003\004", 4) != 0) { archive_set_error(&a->archive, -1, "Damaged Zip archive"); return ARCHIVE_FATAL; } version = p[4]; zip_entry->system = p[5]; zip_entry->zip_flags = archive_le16dec(p + 6); if (zip_entry->zip_flags & (ZIP_ENCRYPTED | ZIP_STRONG_ENCRYPTED)) { zip->has_encrypted_entries = 1; archive_entry_set_is_data_encrypted(entry, 1); if (zip_entry->zip_flags & ZIP_CENTRAL_DIRECTORY_ENCRYPTED && zip_entry->zip_flags & ZIP_ENCRYPTED && zip_entry->zip_flags & ZIP_STRONG_ENCRYPTED) { archive_entry_set_is_metadata_encrypted(entry, 1); return ARCHIVE_FATAL; } } zip->init_decryption = (zip_entry->zip_flags & ZIP_ENCRYPTED); zip_entry->compression = (char)archive_le16dec(p + 8); zip_entry->mtime = zip_time(p + 10); zip_entry->crc32 = archive_le32dec(p + 14); if (zip_entry->zip_flags & ZIP_LENGTH_AT_END) zip_entry->decdat = p[11]; else zip_entry->decdat = p[17]; zip_entry->compressed_size = archive_le32dec(p + 18); zip_entry->uncompressed_size = archive_le32dec(p + 22); filename_length = archive_le16dec(p + 26); extra_length = archive_le16dec(p + 28); __archive_read_consume(a, 30); /* Read the filename. */ if ((h = __archive_read_ahead(a, filename_length, NULL)) == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file header"); return (ARCHIVE_FATAL); } if (zip_entry->zip_flags & ZIP_UTF8_NAME) { /* The filename is stored to be UTF-8. */ if (zip->sconv_utf8 == NULL) { zip->sconv_utf8 = archive_string_conversion_from_charset( &a->archive, "UTF-8", 1); if (zip->sconv_utf8 == NULL) return (ARCHIVE_FATAL); } sconv = zip->sconv_utf8; } else if (zip->sconv != NULL) sconv = zip->sconv; else sconv = zip->sconv_default; if (archive_entry_copy_pathname_l(entry, h, filename_length, sconv) != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Pathname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname cannot be converted " "from %s to current locale.", archive_string_conversion_charset_name(sconv)); ret = ARCHIVE_WARN; } __archive_read_consume(a, filename_length); /* Read the extra data. */ if ((h = __archive_read_ahead(a, extra_length, NULL)) == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file header"); return (ARCHIVE_FATAL); } if (ARCHIVE_OK != process_extra(a, h, extra_length, zip_entry)) { return ARCHIVE_FATAL; } __archive_read_consume(a, extra_length); /* Work around a bug in Info-Zip: When reading from a pipe, it * stats the pipe instead of synthesizing a file entry. */ if ((zip_entry->mode & AE_IFMT) == AE_IFIFO) { zip_entry->mode &= ~ AE_IFMT; zip_entry->mode |= AE_IFREG; } /* If the mode is totally empty, set some sane default. */ if (zip_entry->mode == 0) { zip_entry->mode |= 0664; } /* Windows archivers sometimes use backslash as the directory separator. Normalize to slash. */ if (zip_entry->system == 0 && (wp = archive_entry_pathname_w(entry)) != NULL) { if (wcschr(wp, L'/') == NULL && wcschr(wp, L'\\') != NULL) { size_t i; struct archive_wstring s; archive_string_init(&s); archive_wstrcpy(&s, wp); for (i = 0; i < archive_strlen(&s); i++) { if (s.s[i] == '\\') s.s[i] = '/'; } archive_entry_copy_pathname_w(entry, s.s); archive_wstring_free(&s); } } /* Make sure that entries with a trailing '/' are marked as directories * even if the External File Attributes contains bogus values. If this * is not a directory and there is no type, assume regularfile. */ if ((zip_entry->mode & AE_IFMT) != AE_IFDIR) { int has_slash; wp = archive_entry_pathname_w(entry); if (wp != NULL) { len = wcslen(wp); has_slash = len > 0 && wp[len - 1] == L'/'; } else { cp = archive_entry_pathname(entry); len = (cp != NULL)?strlen(cp):0; has_slash = len > 0 && cp[len - 1] == '/'; } /* Correct file type as needed. */ if (has_slash) { zip_entry->mode &= ~AE_IFMT; zip_entry->mode |= AE_IFDIR; zip_entry->mode |= 0111; } else if ((zip_entry->mode & AE_IFMT) == 0) { zip_entry->mode |= AE_IFREG; } } /* Make sure directories end in '/' */ if ((zip_entry->mode & AE_IFMT) == AE_IFDIR) { wp = archive_entry_pathname_w(entry); if (wp != NULL) { len = wcslen(wp); if (len > 0 && wp[len - 1] != L'/') { struct archive_wstring s; archive_string_init(&s); archive_wstrcat(&s, wp); archive_wstrappend_wchar(&s, L'/'); archive_entry_copy_pathname_w(entry, s.s); archive_wstring_free(&s); } } else { cp = archive_entry_pathname(entry); len = (cp != NULL)?strlen(cp):0; if (len > 0 && cp[len - 1] != '/') { struct archive_string s; archive_string_init(&s); archive_strcat(&s, cp); archive_strappend_char(&s, '/'); archive_entry_set_pathname(entry, s.s); archive_string_free(&s); } } } if (zip_entry->flags & LA_FROM_CENTRAL_DIRECTORY) { /* If this came from the central dir, it's size info * is definitive, so ignore the length-at-end flag. */ zip_entry->zip_flags &= ~ZIP_LENGTH_AT_END; /* If local header is missing a value, use the one from the central directory. If both have it, warn about mismatches. */ if (zip_entry->crc32 == 0) { zip_entry->crc32 = zip_entry_central_dir.crc32; } else if (!zip->ignore_crc32 && zip_entry->crc32 != zip_entry_central_dir.crc32) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Inconsistent CRC32 values"); ret = ARCHIVE_WARN; } if (zip_entry->compressed_size == 0) { zip_entry->compressed_size = zip_entry_central_dir.compressed_size; } else if (zip_entry->compressed_size != zip_entry_central_dir.compressed_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Inconsistent compressed size: " "%jd in central directory, %jd in local header", (intmax_t)zip_entry_central_dir.compressed_size, (intmax_t)zip_entry->compressed_size); ret = ARCHIVE_WARN; } if (zip_entry->uncompressed_size == 0) { zip_entry->uncompressed_size = zip_entry_central_dir.uncompressed_size; } else if (zip_entry->uncompressed_size != zip_entry_central_dir.uncompressed_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Inconsistent uncompressed size: " "%jd in central directory, %jd in local header", (intmax_t)zip_entry_central_dir.uncompressed_size, (intmax_t)zip_entry->uncompressed_size); ret = ARCHIVE_WARN; } } /* Populate some additional entry fields: */ archive_entry_set_mode(entry, zip_entry->mode); archive_entry_set_uid(entry, zip_entry->uid); archive_entry_set_gid(entry, zip_entry->gid); archive_entry_set_mtime(entry, zip_entry->mtime, 0); archive_entry_set_ctime(entry, zip_entry->ctime, 0); archive_entry_set_atime(entry, zip_entry->atime, 0); if ((zip->entry->mode & AE_IFMT) == AE_IFLNK) { size_t linkname_length; if (zip_entry->compressed_size > 64 * 1024) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Zip file with oversized link entry"); return ARCHIVE_FATAL; } linkname_length = (size_t)zip_entry->compressed_size; archive_entry_set_size(entry, 0); p = __archive_read_ahead(a, linkname_length, NULL); if (p == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Truncated Zip file"); return ARCHIVE_FATAL; } sconv = zip->sconv; if (sconv == NULL && (zip->entry->zip_flags & ZIP_UTF8_NAME)) sconv = zip->sconv_utf8; if (sconv == NULL) sconv = zip->sconv_default; if (archive_entry_copy_symlink_l(entry, p, linkname_length, sconv) != 0) { if (errno != ENOMEM && sconv == zip->sconv_utf8 && (zip->entry->zip_flags & ZIP_UTF8_NAME)) archive_entry_copy_symlink_l(entry, p, linkname_length, NULL); if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Symlink"); return (ARCHIVE_FATAL); } /* * Since there is no character-set regulation for * symlink name, do not report the conversion error * in an automatic conversion. */ if (sconv != zip->sconv_utf8 || (zip->entry->zip_flags & ZIP_UTF8_NAME) == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Symlink cannot be converted " "from %s to current locale.", archive_string_conversion_charset_name( sconv)); ret = ARCHIVE_WARN; } } zip_entry->uncompressed_size = zip_entry->compressed_size = 0; if (__archive_read_consume(a, linkname_length) < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Read error skipping symlink target name"); return ARCHIVE_FATAL; } } else if (0 == (zip_entry->zip_flags & ZIP_LENGTH_AT_END) || zip_entry->uncompressed_size > 0) { /* Set the size only if it's meaningful. */ archive_entry_set_size(entry, zip_entry->uncompressed_size); } zip->entry_bytes_remaining = zip_entry->compressed_size; /* If there's no body, force read_data() to return EOF immediately. */ if (0 == (zip_entry->zip_flags & ZIP_LENGTH_AT_END) && zip->entry_bytes_remaining < 1) zip->end_of_entry = 1; /* Set up a more descriptive format name. */ archive_string_empty(&zip->format_name); archive_string_sprintf(&zip->format_name, "ZIP %d.%d (%s)", version / 10, version % 10, compression_name(zip->entry->compression)); a->archive.archive_format_name = zip->format_name.s; return (ret); } static int check_authentication_code(struct archive_read *a, const void *_p) { struct zip *zip = (struct zip *)(a->format->data); /* Check authentication code. */ if (zip->hctx_valid) { const void *p; uint8_t hmac[20]; size_t hmac_len = 20; int cmp; archive_hmac_sha1_final(&zip->hctx, hmac, &hmac_len); if (_p == NULL) { /* Read authentication code. */ p = __archive_read_ahead(a, AUTH_CODE_SIZE, NULL); if (p == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file data"); return (ARCHIVE_FATAL); } } else { p = _p; } cmp = memcmp(hmac, p, AUTH_CODE_SIZE); __archive_read_consume(a, AUTH_CODE_SIZE); if (cmp != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "ZIP bad Authentication code"); return (ARCHIVE_WARN); } } return (ARCHIVE_OK); } /* * Read "uncompressed" data. There are three cases: * 1) We know the size of the data. This is always true for the * seeking reader (we've examined the Central Directory already). * 2) ZIP_LENGTH_AT_END was set, but only the CRC was deferred. * Info-ZIP seems to do this; we know the size but have to grab * the CRC from the data descriptor afterwards. * 3) We're streaming and ZIP_LENGTH_AT_END was specified and * we have no size information. In this case, we can do pretty * well by watching for the data descriptor record. The data * descriptor is 16 bytes and includes a computed CRC that should * provide a strong check. * * TODO: Technically, the PK\007\010 signature is optional. * In the original spec, the data descriptor contained CRC * and size fields but had no leading signature. In practice, * newer writers seem to provide the signature pretty consistently. * * For uncompressed data, the PK\007\010 marker seems essential * to be sure we've actually seen the end of the entry. * * Returns ARCHIVE_OK if successful, ARCHIVE_FATAL otherwise, sets * zip->end_of_entry if it consumes all of the data. */ static int zip_read_data_none(struct archive_read *a, const void **_buff, size_t *size, int64_t *offset) { struct zip *zip; const char *buff; ssize_t bytes_avail; int r; (void)offset; /* UNUSED */ zip = (struct zip *)(a->format->data); if (zip->entry->zip_flags & ZIP_LENGTH_AT_END) { const char *p; ssize_t grabbing_bytes = 24; if (zip->hctx_valid) grabbing_bytes += AUTH_CODE_SIZE; /* Grab at least 24 bytes. */ buff = __archive_read_ahead(a, grabbing_bytes, &bytes_avail); if (bytes_avail < grabbing_bytes) { /* Zip archives have end-of-archive markers that are longer than this, so a failure to get at least 24 bytes really does indicate a truncated file. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file data"); return (ARCHIVE_FATAL); } /* Check for a complete PK\007\010 signature, followed * by the correct 4-byte CRC. */ p = buff; if (zip->hctx_valid) p += AUTH_CODE_SIZE; if (p[0] == 'P' && p[1] == 'K' && p[2] == '\007' && p[3] == '\010' && (archive_le32dec(p + 4) == zip->entry_crc32 || zip->ignore_crc32 || (zip->hctx_valid && zip->entry->aes_extra.vendor == AES_VENDOR_AE_2))) { if (zip->entry->flags & LA_USED_ZIP64) { uint64_t compressed, uncompressed; zip->entry->crc32 = archive_le32dec(p + 4); compressed = archive_le64dec(p + 8); uncompressed = archive_le64dec(p + 16); if (compressed > INT64_MAX || uncompressed > INT64_MAX) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Overflow of 64-bit file sizes"); return ARCHIVE_FAILED; } zip->entry->compressed_size = compressed; zip->entry->uncompressed_size = uncompressed; zip->unconsumed = 24; } else { zip->entry->crc32 = archive_le32dec(p + 4); zip->entry->compressed_size = archive_le32dec(p + 8); zip->entry->uncompressed_size = archive_le32dec(p + 12); zip->unconsumed = 16; } if (zip->hctx_valid) { r = check_authentication_code(a, buff); if (r != ARCHIVE_OK) return (r); } zip->end_of_entry = 1; return (ARCHIVE_OK); } /* If not at EOF, ensure we consume at least one byte. */ ++p; /* Scan forward until we see where a PK\007\010 signature * might be. */ /* Return bytes up until that point. On the next call, * the code above will verify the data descriptor. */ while (p < buff + bytes_avail - 4) { if (p[3] == 'P') { p += 3; } else if (p[3] == 'K') { p += 2; } else if (p[3] == '\007') { p += 1; } else if (p[3] == '\010' && p[2] == '\007' && p[1] == 'K' && p[0] == 'P') { if (zip->hctx_valid) p -= AUTH_CODE_SIZE; break; } else { p += 4; } } bytes_avail = p - buff; } else { if (zip->entry_bytes_remaining == 0) { zip->end_of_entry = 1; if (zip->hctx_valid) { r = check_authentication_code(a, NULL); if (r != ARCHIVE_OK) return (r); } return (ARCHIVE_OK); } /* Grab a bunch of bytes. */ buff = __archive_read_ahead(a, 1, &bytes_avail); if (bytes_avail <= 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file data"); return (ARCHIVE_FATAL); } if (bytes_avail > zip->entry_bytes_remaining) bytes_avail = (ssize_t)zip->entry_bytes_remaining; } if (zip->tctx_valid || zip->cctx_valid) { size_t dec_size = bytes_avail; if (dec_size > zip->decrypted_buffer_size) dec_size = zip->decrypted_buffer_size; if (zip->tctx_valid) { trad_enc_decrypt_update(&zip->tctx, (const uint8_t *)buff, dec_size, zip->decrypted_buffer, dec_size); } else { size_t dsize = dec_size; archive_hmac_sha1_update(&zip->hctx, (const uint8_t *)buff, dec_size); archive_decrypto_aes_ctr_update(&zip->cctx, (const uint8_t *)buff, dec_size, zip->decrypted_buffer, &dsize); } bytes_avail = dec_size; buff = (const char *)zip->decrypted_buffer; } *size = bytes_avail; zip->entry_bytes_remaining -= bytes_avail; zip->entry_uncompressed_bytes_read += bytes_avail; zip->entry_compressed_bytes_read += bytes_avail; zip->unconsumed += bytes_avail; *_buff = buff; return (ARCHIVE_OK); } static int consume_optional_marker(struct archive_read *a, struct zip *zip) { if (zip->end_of_entry && (zip->entry->zip_flags & ZIP_LENGTH_AT_END)) { const char *p; if (NULL == (p = __archive_read_ahead(a, 24, NULL))) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP end-of-file record"); return (ARCHIVE_FATAL); } /* Consume the optional PK\007\010 marker. */ if (p[0] == 'P' && p[1] == 'K' && p[2] == '\007' && p[3] == '\010') { p += 4; zip->unconsumed = 4; } if (zip->entry->flags & LA_USED_ZIP64) { uint64_t compressed, uncompressed; zip->entry->crc32 = archive_le32dec(p); compressed = archive_le64dec(p + 4); uncompressed = archive_le64dec(p + 12); if (compressed > INT64_MAX || uncompressed > INT64_MAX) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Overflow of 64-bit file sizes"); return ARCHIVE_FAILED; } zip->entry->compressed_size = compressed; zip->entry->uncompressed_size = uncompressed; zip->unconsumed += 20; } else { zip->entry->crc32 = archive_le32dec(p); zip->entry->compressed_size = archive_le32dec(p + 4); zip->entry->uncompressed_size = archive_le32dec(p + 8); zip->unconsumed += 12; } } return (ARCHIVE_OK); } #if HAVE_LZMA_H && HAVE_LIBLZMA static int zipx_xz_init(struct archive_read *a, struct zip *zip) { lzma_ret r; if(zip->zipx_lzma_valid) { lzma_end(&zip->zipx_lzma_stream); zip->zipx_lzma_valid = 0; } memset(&zip->zipx_lzma_stream, 0, sizeof(zip->zipx_lzma_stream)); r = lzma_stream_decoder(&zip->zipx_lzma_stream, UINT64_MAX, 0); if (r != LZMA_OK) { archive_set_error(&(a->archive), ARCHIVE_ERRNO_MISC, "xz initialization failed(%d)", r); return (ARCHIVE_FAILED); } zip->zipx_lzma_valid = 1; free(zip->uncompressed_buffer); zip->uncompressed_buffer_size = 256 * 1024; zip->uncompressed_buffer = (uint8_t*) malloc(zip->uncompressed_buffer_size); if (zip->uncompressed_buffer == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for xz decompression"); return (ARCHIVE_FATAL); } zip->decompress_init = 1; return (ARCHIVE_OK); } static int zipx_lzma_alone_init(struct archive_read *a, struct zip *zip) { lzma_ret r; const uint8_t* p; #pragma pack(push) #pragma pack(1) struct _alone_header { uint8_t bytes[5]; uint64_t uncompressed_size; } alone_header; #pragma pack(pop) /* To unpack ZIPX's "LZMA" (id 14) stream we can use standard liblzma that * is a part of XZ Utils. The stream format stored inside ZIPX file is a * modified "lzma alone" file format, that was used by the `lzma` utility * which was later deprecated in favour of `xz` utility. Since those * formats are nearly the same, we can use a standard "lzma alone" decoder * from XZ Utils. */ memset(&zip->zipx_lzma_stream, 0, sizeof(zip->zipx_lzma_stream)); r = lzma_alone_decoder(&zip->zipx_lzma_stream, UINT64_MAX); if (r != LZMA_OK) { archive_set_error(&(a->archive), ARCHIVE_ERRNO_MISC, "lzma initialization failed(%d)", r); return (ARCHIVE_FAILED); } /* Flag the cleanup function that we want our lzma-related structures * to be freed later. */ zip->zipx_lzma_valid = 1; /* The "lzma alone" file format and the stream format inside ZIPx are * almost the same. Here's an example of a structure of "lzma alone" * format: * * $ cat /bin/ls | lzma | xxd | head -n 1 * 00000000: 5d00 0080 00ff ffff ffff ffff ff00 2814 * * 5 bytes 8 bytes n bytes * <lzma_params><uncompressed_size><data...> * * lzma_params is a 5-byte blob that has to be decoded to extract * parameters of this LZMA stream. The uncompressed_size field is an * uint64_t value that contains information about the size of the * uncompressed file, or UINT64_MAX if this value is unknown. The <data...> * part is the actual lzma-compressed data stream. * * Now here's the structure of the stream inside the ZIPX file: * * $ cat stream_inside_zipx | xxd | head -n 1 * 00000000: 0914 0500 5d00 8000 0000 2814 .... .... * * 2byte 2byte 5 bytes n bytes * <magic1><magic2><lzma_params><data...> * * This means that the ZIPX file contains an additional magic1 and magic2 * headers, the lzma_params field contains the same parameter set as in the * "lzma alone" format, and the <data...> field is the same as in the "lzma * alone" format as well. Note that also the zipx format is missing the * uncompressed_size field. * * So, in order to use the "lzma alone" decoder for the zipx lzma stream, * we simply need to shuffle around some fields, prepare a new lzma alone * header, feed it into lzma alone decoder so it will initialize itself * properly, and then we can start feeding normal zipx lzma stream into the * decoder. */ /* Read magic1,magic2,lzma_params from the ZIPX stream. */ if((p = __archive_read_ahead(a, 9, NULL)) == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated lzma data"); return (ARCHIVE_FATAL); } if(p[2] != 0x05 || p[3] != 0x00) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid lzma data"); return (ARCHIVE_FATAL); } /* Prepare an lzma alone header: copy the lzma_params blob into a proper * place into the lzma alone header. */ memcpy(&alone_header.bytes[0], p + 4, 5); /* Initialize the 'uncompressed size' field to unknown; we'll manually * monitor how many bytes there are still to be uncompressed. */ alone_header.uncompressed_size = UINT64_MAX; if(!zip->uncompressed_buffer) { zip->uncompressed_buffer_size = 256 * 1024; zip->uncompressed_buffer = (uint8_t*) malloc(zip->uncompressed_buffer_size); if (zip->uncompressed_buffer == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for lzma decompression"); return (ARCHIVE_FATAL); } } zip->zipx_lzma_stream.next_in = (void*) &alone_header; zip->zipx_lzma_stream.avail_in = sizeof(alone_header); zip->zipx_lzma_stream.total_in = 0; zip->zipx_lzma_stream.next_out = zip->uncompressed_buffer; zip->zipx_lzma_stream.avail_out = zip->uncompressed_buffer_size; zip->zipx_lzma_stream.total_out = 0; /* Feed only the header into the lzma alone decoder. This will effectively * initialize the decoder, and will not produce any output bytes yet. */ r = lzma_code(&zip->zipx_lzma_stream, LZMA_RUN); if (r != LZMA_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "lzma stream initialization error"); return ARCHIVE_FATAL; } /* We've already consumed some bytes, so take this into account. */ __archive_read_consume(a, 9); zip->entry_bytes_remaining -= 9; zip->entry_compressed_bytes_read += 9; zip->decompress_init = 1; return (ARCHIVE_OK); } static int zip_read_data_zipx_xz(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct zip* zip = (struct zip *)(a->format->data); int ret; lzma_ret lz_ret; const void* compressed_buf; ssize_t bytes_avail, in_bytes, to_consume = 0; (void) offset; /* UNUSED */ /* Initialize decompressor if not yet initialized. */ if (!zip->decompress_init) { ret = zipx_xz_init(a, zip); if (ret != ARCHIVE_OK) return (ret); } compressed_buf = __archive_read_ahead(a, 1, &bytes_avail); if (bytes_avail < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated xz file body"); return (ARCHIVE_FATAL); } in_bytes = zipmin(zip->entry_bytes_remaining, bytes_avail); zip->zipx_lzma_stream.next_in = compressed_buf; zip->zipx_lzma_stream.avail_in = in_bytes; zip->zipx_lzma_stream.total_in = 0; zip->zipx_lzma_stream.next_out = zip->uncompressed_buffer; zip->zipx_lzma_stream.avail_out = zip->uncompressed_buffer_size; zip->zipx_lzma_stream.total_out = 0; /* Perform the decompression. */ lz_ret = lzma_code(&zip->zipx_lzma_stream, LZMA_RUN); switch(lz_ret) { case LZMA_DATA_ERROR: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "xz data error (error %d)", (int) lz_ret); return (ARCHIVE_FATAL); case LZMA_NO_CHECK: case LZMA_OK: break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "xz unknown error %d", (int) lz_ret); return (ARCHIVE_FATAL); case LZMA_STREAM_END: lzma_end(&zip->zipx_lzma_stream); zip->zipx_lzma_valid = 0; if((int64_t) zip->zipx_lzma_stream.total_in != zip->entry_bytes_remaining) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "xz premature end of stream"); return (ARCHIVE_FATAL); } zip->end_of_entry = 1; break; } to_consume = zip->zipx_lzma_stream.total_in; __archive_read_consume(a, to_consume); zip->entry_bytes_remaining -= to_consume; zip->entry_compressed_bytes_read += to_consume; zip->entry_uncompressed_bytes_read += zip->zipx_lzma_stream.total_out; *size = zip->zipx_lzma_stream.total_out; *buff = zip->uncompressed_buffer; ret = consume_optional_marker(a, zip); if (ret != ARCHIVE_OK) return (ret); return (ARCHIVE_OK); } static int zip_read_data_zipx_lzma_alone(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct zip* zip = (struct zip *)(a->format->data); int ret; lzma_ret lz_ret; const void* compressed_buf; ssize_t bytes_avail, in_bytes, to_consume; (void) offset; /* UNUSED */ /* Initialize decompressor if not yet initialized. */ if (!zip->decompress_init) { ret = zipx_lzma_alone_init(a, zip); if (ret != ARCHIVE_OK) return (ret); } /* Fetch more compressed data. The same note as in deflate handler applies * here as well: * * Note: '1' here is a performance optimization. Recall that the * decompression layer returns a count of available bytes; asking for more * than that forces the decompressor to combine reads by copying data. */ compressed_buf = __archive_read_ahead(a, 1, &bytes_avail); if (bytes_avail < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated lzma file body"); return (ARCHIVE_FATAL); } /* Set decompressor parameters. */ in_bytes = zipmin(zip->entry_bytes_remaining, bytes_avail); zip->zipx_lzma_stream.next_in = compressed_buf; zip->zipx_lzma_stream.avail_in = in_bytes; zip->zipx_lzma_stream.total_in = 0; zip->zipx_lzma_stream.next_out = zip->uncompressed_buffer; zip->zipx_lzma_stream.avail_out = /* These lzma_alone streams lack end of stream marker, so let's make * sure the unpacker won't try to unpack more than it's supposed to. */ zipmin((int64_t) zip->uncompressed_buffer_size, zip->entry->uncompressed_size - zip->entry_uncompressed_bytes_read); zip->zipx_lzma_stream.total_out = 0; /* Perform the decompression. */ lz_ret = lzma_code(&zip->zipx_lzma_stream, LZMA_RUN); switch(lz_ret) { case LZMA_DATA_ERROR: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "lzma data error (error %d)", (int) lz_ret); return (ARCHIVE_FATAL); case LZMA_OK: break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "lzma unknown error %d", (int) lz_ret); return (ARCHIVE_FATAL); } to_consume = zip->zipx_lzma_stream.total_in; /* Update pointers. */ __archive_read_consume(a, to_consume); zip->entry_bytes_remaining -= to_consume; zip->entry_compressed_bytes_read += to_consume; zip->entry_uncompressed_bytes_read += zip->zipx_lzma_stream.total_out; if(zip->entry_bytes_remaining == 0) { zip->end_of_entry = 1; } /* Return values. */ *size = zip->zipx_lzma_stream.total_out; *buff = zip->uncompressed_buffer; /* Behave the same way as during deflate decompression. */ ret = consume_optional_marker(a, zip); if (ret != ARCHIVE_OK) return (ret); /* Free lzma decoder handle because we'll no longer need it. */ if(zip->end_of_entry) { lzma_end(&zip->zipx_lzma_stream); zip->zipx_lzma_valid = 0; } /* If we're here, then we're good! */ return (ARCHIVE_OK); } #endif /* HAVE_LZMA_H && HAVE_LIBLZMA */ static int zipx_ppmd8_init(struct archive_read *a, struct zip *zip) { const void* p; uint32_t val; uint32_t order; uint32_t mem; uint32_t restore_method; /* Remove previous decompression context if it exists. */ if(zip->ppmd8_valid) { __archive_ppmd8_functions.Ppmd8_Free(&zip->ppmd8); zip->ppmd8_valid = 0; } /* Create a new decompression context. */ __archive_ppmd8_functions.Ppmd8_Construct(&zip->ppmd8); zip->ppmd8_stream_failed = 0; /* Setup function pointers required by Ppmd8 decompressor. The * 'ppmd_read' function will feed new bytes to the decompressor, * and will increment the 'zip->zipx_ppmd_read_compressed' counter. */ zip->ppmd8.Stream.In = &zip->zipx_ppmd_stream; zip->zipx_ppmd_stream.a = a; zip->zipx_ppmd_stream.Read = &ppmd_read; /* Reset number of read bytes to 0. */ zip->zipx_ppmd_read_compressed = 0; /* Read Ppmd8 header (2 bytes). */ p = __archive_read_ahead(a, 2, NULL); if(!p) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated file data in PPMd8 stream"); return (ARCHIVE_FATAL); } __archive_read_consume(a, 2); /* Decode the stream's compression parameters. */ val = archive_le16dec(p); order = (val & 15) + 1; mem = ((val >> 4) & 0xff) + 1; restore_method = (val >> 12); if(order < 2 || restore_method > 2) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid parameter set in PPMd8 stream (order=%d, " "restore=%d)", order, restore_method); return (ARCHIVE_FAILED); } /* Allocate the memory needed to properly decompress the file. */ if(!__archive_ppmd8_functions.Ppmd8_Alloc(&zip->ppmd8, mem << 20)) { archive_set_error(&a->archive, ENOMEM, "Unable to allocate memory for PPMd8 stream: %d bytes", mem << 20); return (ARCHIVE_FATAL); } /* Signal the cleanup function to release Ppmd8 context in the * cleanup phase. */ zip->ppmd8_valid = 1; /* Perform further Ppmd8 initialization. */ if(!__archive_ppmd8_functions.Ppmd8_RangeDec_Init(&zip->ppmd8)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "PPMd8 stream range decoder initialization error"); return (ARCHIVE_FATAL); } __archive_ppmd8_functions.Ppmd8_Init(&zip->ppmd8, order, restore_method); /* Allocate the buffer that will hold uncompressed data. */ free(zip->uncompressed_buffer); zip->uncompressed_buffer_size = 256 * 1024; zip->uncompressed_buffer = (uint8_t*) malloc(zip->uncompressed_buffer_size); if(zip->uncompressed_buffer == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for PPMd8 decompression"); return ARCHIVE_FATAL; } /* Ppmd8 initialization is done. */ zip->decompress_init = 1; /* We've already read 2 bytes in the output stream. Additionally, * Ppmd8 initialization code could read some data as well. So we * are advancing the stream by 2 bytes plus whatever number of * bytes Ppmd8 init function used. */ zip->entry_compressed_bytes_read += 2 + zip->zipx_ppmd_read_compressed; return ARCHIVE_OK; } static int zip_read_data_zipx_ppmd(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct zip* zip = (struct zip *)(a->format->data); int ret; size_t consumed_bytes = 0; ssize_t bytes_avail = 0; (void) offset; /* UNUSED */ /* If we're here for the first time, initialize Ppmd8 decompression * context first. */ if(!zip->decompress_init) { ret = zipx_ppmd8_init(a, zip); if(ret != ARCHIVE_OK) return ret; } /* Fetch for more data. We're reading 1 byte here, but libarchive should * prefetch more bytes. */ (void) __archive_read_ahead(a, 1, &bytes_avail); if(bytes_avail < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated PPMd8 file body"); return (ARCHIVE_FATAL); } /* This counter will be updated inside ppmd_read(), which at one * point will be called by Ppmd8_DecodeSymbol. */ zip->zipx_ppmd_read_compressed = 0; /* Decompression loop. */ do { int sym = __archive_ppmd8_functions.Ppmd8_DecodeSymbol(&zip->ppmd8); if(sym < 0) { zip->end_of_entry = 1; break; } /* This field is set by ppmd_read() when there was no more data * to be read. */ if(zip->ppmd8_stream_failed) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated PPMd8 file body"); return (ARCHIVE_FATAL); } zip->uncompressed_buffer[consumed_bytes] = (uint8_t) sym; ++consumed_bytes; } while(consumed_bytes < zip->uncompressed_buffer_size); /* Update pointers for libarchive. */ *buff = zip->uncompressed_buffer; *size = consumed_bytes; /* Update pointers so we can continue decompression in another call. */ zip->entry_bytes_remaining -= zip->zipx_ppmd_read_compressed; zip->entry_compressed_bytes_read += zip->zipx_ppmd_read_compressed; zip->entry_uncompressed_bytes_read += consumed_bytes; /* If we're at the end of stream, deinitialize Ppmd8 context. */ if(zip->end_of_entry) { __archive_ppmd8_functions.Ppmd8_Free(&zip->ppmd8); zip->ppmd8_valid = 0; } /* Seek for optional marker, same way as in each zip entry. */ ret = consume_optional_marker(a, zip); if (ret != ARCHIVE_OK) return ret; return ARCHIVE_OK; } #ifdef HAVE_BZLIB_H static int zipx_bzip2_init(struct archive_read *a, struct zip *zip) { int r; /* Deallocate already existing BZ2 decompression context if it * exists. */ if(zip->bzstream_valid) { BZ2_bzDecompressEnd(&zip->bzstream); zip->bzstream_valid = 0; } /* Allocate a new BZ2 decompression context. */ memset(&zip->bzstream, 0, sizeof(bz_stream)); r = BZ2_bzDecompressInit(&zip->bzstream, 0, 1); if(r != BZ_OK) { archive_set_error(&(a->archive), ARCHIVE_ERRNO_MISC, "bzip2 initialization failed(%d)", r); return ARCHIVE_FAILED; } /* Mark the bzstream field to be released in cleanup phase. */ zip->bzstream_valid = 1; /* (Re)allocate the buffer that will contain decompressed bytes. */ free(zip->uncompressed_buffer); zip->uncompressed_buffer_size = 256 * 1024; zip->uncompressed_buffer = (uint8_t*) malloc(zip->uncompressed_buffer_size); if (zip->uncompressed_buffer == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for bzip2 decompression"); return ARCHIVE_FATAL; } /* Initialization done. */ zip->decompress_init = 1; return ARCHIVE_OK; } static int zip_read_data_zipx_bzip2(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct zip *zip = (struct zip *)(a->format->data); ssize_t bytes_avail = 0, in_bytes, to_consume; const void *compressed_buff; int r; uint64_t total_out; (void) offset; /* UNUSED */ /* Initialize decompression context if we're here for the first time. */ if(!zip->decompress_init) { r = zipx_bzip2_init(a, zip); if(r != ARCHIVE_OK) return r; } /* Fetch more compressed bytes. */ compressed_buff = __archive_read_ahead(a, 1, &bytes_avail); if(bytes_avail < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated bzip2 file body"); return (ARCHIVE_FATAL); } in_bytes = zipmin(zip->entry_bytes_remaining, bytes_avail); if(in_bytes < 1) { /* libbz2 doesn't complain when caller feeds avail_in == 0. It will * actually return success in this case, which is undesirable. This is * why we need to make this check manually. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated bzip2 file body"); return (ARCHIVE_FATAL); } /* Setup buffer boundaries. */ zip->bzstream.next_in = (char*)(uintptr_t) compressed_buff; zip->bzstream.avail_in = in_bytes; zip->bzstream.total_in_hi32 = 0; zip->bzstream.total_in_lo32 = 0; zip->bzstream.next_out = (char*) zip->uncompressed_buffer; zip->bzstream.avail_out = zip->uncompressed_buffer_size; zip->bzstream.total_out_hi32 = 0; zip->bzstream.total_out_lo32 = 0; /* Perform the decompression. */ r = BZ2_bzDecompress(&zip->bzstream); switch(r) { case BZ_STREAM_END: /* If we're at the end of the stream, deinitialize the * decompression context now. */ switch(BZ2_bzDecompressEnd(&zip->bzstream)) { case BZ_OK: break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to clean up bzip2 decompressor"); return ARCHIVE_FATAL; } zip->end_of_entry = 1; break; case BZ_OK: /* The decompressor has successfully decoded this chunk of * data, but more data is still in queue. */ break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "bzip2 decompression failed"); return ARCHIVE_FATAL; } /* Update the pointers so decompressor can continue decoding. */ to_consume = zip->bzstream.total_in_lo32; __archive_read_consume(a, to_consume); total_out = ((uint64_t) zip->bzstream.total_out_hi32 << 32) + zip->bzstream.total_out_lo32; zip->entry_bytes_remaining -= to_consume; zip->entry_compressed_bytes_read += to_consume; zip->entry_uncompressed_bytes_read += total_out; /* Give libarchive its due. */ *size = total_out; *buff = zip->uncompressed_buffer; /* Seek for optional marker, like in other entries. */ r = consume_optional_marker(a, zip); if(r != ARCHIVE_OK) return r; return ARCHIVE_OK; } #endif #ifdef HAVE_ZLIB_H static int zip_deflate_init(struct archive_read *a, struct zip *zip) { int r; /* If we haven't yet read any data, initialize the decompressor. */ if (!zip->decompress_init) { if (zip->stream_valid) r = inflateReset(&zip->stream); else r = inflateInit2(&zip->stream, -15 /* Don't check for zlib header */); if (r != Z_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Can't initialize ZIP decompression."); return (ARCHIVE_FATAL); } /* Stream structure has been set up. */ zip->stream_valid = 1; /* We've initialized decompression for this stream. */ zip->decompress_init = 1; } return (ARCHIVE_OK); } static int zip_read_data_deflate(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct zip *zip; ssize_t bytes_avail; const void *compressed_buff, *sp; int r; (void)offset; /* UNUSED */ zip = (struct zip *)(a->format->data); /* If the buffer hasn't been allocated, allocate it now. */ if (zip->uncompressed_buffer == NULL) { zip->uncompressed_buffer_size = 256 * 1024; zip->uncompressed_buffer = (unsigned char *)malloc(zip->uncompressed_buffer_size); if (zip->uncompressed_buffer == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for ZIP decompression"); return (ARCHIVE_FATAL); } } r = zip_deflate_init(a, zip); if (r != ARCHIVE_OK) return (r); /* * Note: '1' here is a performance optimization. * Recall that the decompression layer returns a count of * available bytes; asking for more than that forces the * decompressor to combine reads by copying data. */ compressed_buff = sp = __archive_read_ahead(a, 1, &bytes_avail); if (0 == (zip->entry->zip_flags & ZIP_LENGTH_AT_END) && bytes_avail > zip->entry_bytes_remaining) { bytes_avail = (ssize_t)zip->entry_bytes_remaining; } if (bytes_avail < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file body"); return (ARCHIVE_FATAL); } if (zip->tctx_valid || zip->cctx_valid) { if (zip->decrypted_bytes_remaining < (size_t)bytes_avail) { size_t buff_remaining = (zip->decrypted_buffer + zip->decrypted_buffer_size) - (zip->decrypted_ptr + zip->decrypted_bytes_remaining); if (buff_remaining > (size_t)bytes_avail) buff_remaining = (size_t)bytes_avail; if (0 == (zip->entry->zip_flags & ZIP_LENGTH_AT_END) && zip->entry_bytes_remaining > 0) { if ((int64_t)(zip->decrypted_bytes_remaining + buff_remaining) > zip->entry_bytes_remaining) { if (zip->entry_bytes_remaining < (int64_t)zip->decrypted_bytes_remaining) buff_remaining = 0; else buff_remaining = (size_t)zip->entry_bytes_remaining - zip->decrypted_bytes_remaining; } } if (buff_remaining > 0) { if (zip->tctx_valid) { trad_enc_decrypt_update(&zip->tctx, compressed_buff, buff_remaining, zip->decrypted_ptr + zip->decrypted_bytes_remaining, buff_remaining); } else { size_t dsize = buff_remaining; archive_decrypto_aes_ctr_update( &zip->cctx, compressed_buff, buff_remaining, zip->decrypted_ptr + zip->decrypted_bytes_remaining, &dsize); } zip->decrypted_bytes_remaining += buff_remaining; } } bytes_avail = zip->decrypted_bytes_remaining; compressed_buff = (const char *)zip->decrypted_ptr; } /* * A bug in zlib.h: stream.next_in should be marked 'const' * but isn't (the library never alters data through the * next_in pointer, only reads it). The result: this ugly * cast to remove 'const'. */ zip->stream.next_in = (Bytef *)(uintptr_t)(const void *)compressed_buff; zip->stream.avail_in = (uInt)bytes_avail; zip->stream.total_in = 0; zip->stream.next_out = zip->uncompressed_buffer; zip->stream.avail_out = (uInt)zip->uncompressed_buffer_size; zip->stream.total_out = 0; r = inflate(&zip->stream, 0); switch (r) { case Z_OK: break; case Z_STREAM_END: zip->end_of_entry = 1; break; case Z_MEM_ERROR: archive_set_error(&a->archive, ENOMEM, "Out of memory for ZIP decompression"); return (ARCHIVE_FATAL); default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "ZIP decompression failed (%d)", r); return (ARCHIVE_FATAL); } /* Consume as much as the compressor actually used. */ bytes_avail = zip->stream.total_in; if (zip->tctx_valid || zip->cctx_valid) { zip->decrypted_bytes_remaining -= bytes_avail; if (zip->decrypted_bytes_remaining == 0) zip->decrypted_ptr = zip->decrypted_buffer; else zip->decrypted_ptr += bytes_avail; } /* Calculate compressed data as much as we used.*/ if (zip->hctx_valid) archive_hmac_sha1_update(&zip->hctx, sp, bytes_avail); __archive_read_consume(a, bytes_avail); zip->entry_bytes_remaining -= bytes_avail; zip->entry_compressed_bytes_read += bytes_avail; *size = zip->stream.total_out; zip->entry_uncompressed_bytes_read += zip->stream.total_out; *buff = zip->uncompressed_buffer; if (zip->end_of_entry && zip->hctx_valid) { r = check_authentication_code(a, NULL); if (r != ARCHIVE_OK) return (r); } r = consume_optional_marker(a, zip); if (r != ARCHIVE_OK) return (r); return (ARCHIVE_OK); } #endif static int read_decryption_header(struct archive_read *a) { struct zip *zip = (struct zip *)(a->format->data); const char *p; unsigned int remaining_size; unsigned int ts; /* * Read an initialization vector data field. */ p = __archive_read_ahead(a, 2, NULL); if (p == NULL) goto truncated; ts = zip->iv_size; zip->iv_size = archive_le16dec(p); __archive_read_consume(a, 2); if (ts < zip->iv_size) { free(zip->iv); zip->iv = NULL; } p = __archive_read_ahead(a, zip->iv_size, NULL); if (p == NULL) goto truncated; if (zip->iv == NULL) { zip->iv = malloc(zip->iv_size); if (zip->iv == NULL) goto nomem; } memcpy(zip->iv, p, zip->iv_size); __archive_read_consume(a, zip->iv_size); /* * Read a size of remaining decryption header field. */ p = __archive_read_ahead(a, 14, NULL); if (p == NULL) goto truncated; remaining_size = archive_le32dec(p); if (remaining_size < 16 || remaining_size > (1 << 18)) goto corrupted; /* Check if format version is supported. */ if (archive_le16dec(p+4) != 3) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported encryption format version: %u", archive_le16dec(p+4)); return (ARCHIVE_FAILED); } /* * Read an encryption algorithm field. */ zip->alg_id = archive_le16dec(p+6); switch (zip->alg_id) { case 0x6601:/* DES */ case 0x6602:/* RC2 */ case 0x6603:/* 3DES 168 */ case 0x6609:/* 3DES 112 */ case 0x660E:/* AES 128 */ case 0x660F:/* AES 192 */ case 0x6610:/* AES 256 */ case 0x6702:/* RC2 (version >= 5.2) */ case 0x6720:/* Blowfish */ case 0x6721:/* Twofish */ case 0x6801:/* RC4 */ /* Supported encryption algorithm. */ break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unknown encryption algorithm: %u", zip->alg_id); return (ARCHIVE_FAILED); } /* * Read a bit length field. */ zip->bit_len = archive_le16dec(p+8); /* * Read a flags field. */ zip->flags = archive_le16dec(p+10); switch (zip->flags & 0xf000) { case 0x0001: /* Password is required to decrypt. */ case 0x0002: /* Certificates only. */ case 0x0003: /* Password or certificate required to decrypt. */ break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unknown encryption flag: %u", zip->flags); return (ARCHIVE_FAILED); } if ((zip->flags & 0xf000) == 0 || (zip->flags & 0xf000) == 0x4000) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unknown encryption flag: %u", zip->flags); return (ARCHIVE_FAILED); } /* * Read an encrypted random data field. */ ts = zip->erd_size; zip->erd_size = archive_le16dec(p+12); __archive_read_consume(a, 14); if ((zip->erd_size & 0xf) != 0 || (zip->erd_size + 16) > remaining_size || (zip->erd_size + 16) < zip->erd_size) goto corrupted; if (ts < zip->erd_size) { free(zip->erd); zip->erd = NULL; } p = __archive_read_ahead(a, zip->erd_size, NULL); if (p == NULL) goto truncated; if (zip->erd == NULL) { zip->erd = malloc(zip->erd_size); if (zip->erd == NULL) goto nomem; } memcpy(zip->erd, p, zip->erd_size); __archive_read_consume(a, zip->erd_size); /* * Read a reserved data field. */ p = __archive_read_ahead(a, 4, NULL); if (p == NULL) goto truncated; /* Reserved data size should be zero. */ if (archive_le32dec(p) != 0) goto corrupted; __archive_read_consume(a, 4); /* * Read a password validation data field. */ p = __archive_read_ahead(a, 2, NULL); if (p == NULL) goto truncated; ts = zip->v_size; zip->v_size = archive_le16dec(p); __archive_read_consume(a, 2); if ((zip->v_size & 0x0f) != 0 || (zip->erd_size + zip->v_size + 16) > remaining_size || (zip->erd_size + zip->v_size + 16) < (zip->erd_size + zip->v_size)) goto corrupted; if (ts < zip->v_size) { free(zip->v_data); zip->v_data = NULL; } p = __archive_read_ahead(a, zip->v_size, NULL); if (p == NULL) goto truncated; if (zip->v_data == NULL) { zip->v_data = malloc(zip->v_size); if (zip->v_data == NULL) goto nomem; } memcpy(zip->v_data, p, zip->v_size); __archive_read_consume(a, zip->v_size); p = __archive_read_ahead(a, 4, NULL); if (p == NULL) goto truncated; zip->v_crc32 = archive_le32dec(p); __archive_read_consume(a, 4); /*return (ARCHIVE_OK); * This is not fully implemented yet.*/ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Encrypted file is unsupported"); return (ARCHIVE_FAILED); truncated: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file data"); return (ARCHIVE_FATAL); corrupted: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Corrupted ZIP file data"); return (ARCHIVE_FATAL); nomem: archive_set_error(&a->archive, ENOMEM, "No memory for ZIP decryption"); return (ARCHIVE_FATAL); } static int zip_alloc_decryption_buffer(struct archive_read *a) { struct zip *zip = (struct zip *)(a->format->data); size_t bs = 256 * 1024; if (zip->decrypted_buffer == NULL) { zip->decrypted_buffer_size = bs; zip->decrypted_buffer = malloc(bs); if (zip->decrypted_buffer == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for ZIP decryption"); return (ARCHIVE_FATAL); } } zip->decrypted_ptr = zip->decrypted_buffer; return (ARCHIVE_OK); } static int init_traditional_PKWARE_decryption(struct archive_read *a) { struct zip *zip = (struct zip *)(a->format->data); const void *p; int retry; int r; if (zip->tctx_valid) return (ARCHIVE_OK); /* Read the 12 bytes encryption header stored at the start of the data area. */ #define ENC_HEADER_SIZE 12 if (0 == (zip->entry->zip_flags & ZIP_LENGTH_AT_END) && zip->entry_bytes_remaining < ENC_HEADER_SIZE) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated Zip encrypted body: only %jd bytes available", (intmax_t)zip->entry_bytes_remaining); return (ARCHIVE_FATAL); } p = __archive_read_ahead(a, ENC_HEADER_SIZE, NULL); if (p == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file data"); return (ARCHIVE_FATAL); } for (retry = 0;; retry++) { const char *passphrase; uint8_t crcchk; passphrase = __archive_read_next_passphrase(a); if (passphrase == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, (retry > 0)? "Incorrect passphrase": "Passphrase required for this entry"); return (ARCHIVE_FAILED); } /* * Initialize ctx for Traditional PKWARE Decryption. */ r = trad_enc_init(&zip->tctx, passphrase, strlen(passphrase), p, ENC_HEADER_SIZE, &crcchk); if (r == 0 && crcchk == zip->entry->decdat) break;/* The passphrase is OK. */ if (retry > 10000) { /* Avoid infinity loop. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Too many incorrect passphrases"); return (ARCHIVE_FAILED); } } __archive_read_consume(a, ENC_HEADER_SIZE); zip->tctx_valid = 1; if (0 == (zip->entry->zip_flags & ZIP_LENGTH_AT_END)) { zip->entry_bytes_remaining -= ENC_HEADER_SIZE; } /*zip->entry_uncompressed_bytes_read += ENC_HEADER_SIZE;*/ zip->entry_compressed_bytes_read += ENC_HEADER_SIZE; zip->decrypted_bytes_remaining = 0; return (zip_alloc_decryption_buffer(a)); #undef ENC_HEADER_SIZE } static int init_WinZip_AES_decryption(struct archive_read *a) { struct zip *zip = (struct zip *)(a->format->data); const void *p; const uint8_t *pv; size_t key_len, salt_len; uint8_t derived_key[MAX_DERIVED_KEY_BUF_SIZE]; int retry; int r; if (zip->cctx_valid || zip->hctx_valid) return (ARCHIVE_OK); switch (zip->entry->aes_extra.strength) { case 1: salt_len = 8; key_len = 16; break; case 2: salt_len = 12; key_len = 24; break; case 3: salt_len = 16; key_len = 32; break; default: goto corrupted; } p = __archive_read_ahead(a, salt_len + 2, NULL); if (p == NULL) goto truncated; for (retry = 0;; retry++) { const char *passphrase; passphrase = __archive_read_next_passphrase(a); if (passphrase == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, (retry > 0)? "Incorrect passphrase": "Passphrase required for this entry"); return (ARCHIVE_FAILED); } memset(derived_key, 0, sizeof(derived_key)); r = archive_pbkdf2_sha1(passphrase, strlen(passphrase), p, salt_len, 1000, derived_key, key_len * 2 + 2); if (r != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Decryption is unsupported due to lack of " "crypto library"); return (ARCHIVE_FAILED); } /* Check password verification value. */ pv = ((const uint8_t *)p) + salt_len; if (derived_key[key_len * 2] == pv[0] && derived_key[key_len * 2 + 1] == pv[1]) break;/* The passphrase is OK. */ if (retry > 10000) { /* Avoid infinity loop. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Too many incorrect passphrases"); return (ARCHIVE_FAILED); } } r = archive_decrypto_aes_ctr_init(&zip->cctx, derived_key, key_len); if (r != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Decryption is unsupported due to lack of crypto library"); return (ARCHIVE_FAILED); } r = archive_hmac_sha1_init(&zip->hctx, derived_key + key_len, key_len); if (r != 0) { archive_decrypto_aes_ctr_release(&zip->cctx); archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to initialize HMAC-SHA1"); return (ARCHIVE_FAILED); } zip->cctx_valid = zip->hctx_valid = 1; __archive_read_consume(a, salt_len + 2); zip->entry_bytes_remaining -= salt_len + 2 + AUTH_CODE_SIZE; if (0 == (zip->entry->zip_flags & ZIP_LENGTH_AT_END) && zip->entry_bytes_remaining < 0) goto corrupted; zip->entry_compressed_bytes_read += salt_len + 2 + AUTH_CODE_SIZE; zip->decrypted_bytes_remaining = 0; zip->entry->compression = zip->entry->aes_extra.compression; return (zip_alloc_decryption_buffer(a)); truncated: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file data"); return (ARCHIVE_FATAL); corrupted: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Corrupted ZIP file data"); return (ARCHIVE_FATAL); } static int archive_read_format_zip_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { int r; struct zip *zip = (struct zip *)(a->format->data); if (zip->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) { zip->has_encrypted_entries = 0; } *offset = zip->entry_uncompressed_bytes_read; *size = 0; *buff = NULL; /* If we hit end-of-entry last time, return ARCHIVE_EOF. */ if (zip->end_of_entry) return (ARCHIVE_EOF); /* Return EOF immediately if this is a non-regular file. */ if (AE_IFREG != (zip->entry->mode & AE_IFMT)) return (ARCHIVE_EOF); __archive_read_consume(a, zip->unconsumed); zip->unconsumed = 0; if (zip->init_decryption) { zip->has_encrypted_entries = 1; if (zip->entry->zip_flags & ZIP_STRONG_ENCRYPTED) r = read_decryption_header(a); else if (zip->entry->compression == WINZIP_AES_ENCRYPTION) r = init_WinZip_AES_decryption(a); else r = init_traditional_PKWARE_decryption(a); if (r != ARCHIVE_OK) return (r); zip->init_decryption = 0; } switch(zip->entry->compression) { case 0: /* No compression. */ r = zip_read_data_none(a, buff, size, offset); break; #ifdef HAVE_BZLIB_H case 12: /* ZIPx bzip2 compression. */ r = zip_read_data_zipx_bzip2(a, buff, size, offset); break; #endif #if HAVE_LZMA_H && HAVE_LIBLZMA case 14: /* ZIPx LZMA compression. */ r = zip_read_data_zipx_lzma_alone(a, buff, size, offset); break; case 95: /* ZIPx XZ compression. */ r = zip_read_data_zipx_xz(a, buff, size, offset); break; #endif /* PPMd support is built-in, so we don't need any #if guards. */ case 98: /* ZIPx PPMd compression. */ r = zip_read_data_zipx_ppmd(a, buff, size, offset); break; #ifdef HAVE_ZLIB_H case 8: /* Deflate compression. */ r = zip_read_data_deflate(a, buff, size, offset); break; #endif default: /* Unsupported compression. */ /* Return a warning. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported ZIP compression method (%d: %s)", zip->entry->compression, compression_name(zip->entry->compression)); /* We can't decompress this entry, but we will * be able to skip() it and try the next entry. */ return (ARCHIVE_FAILED); break; } if (r != ARCHIVE_OK) return (r); /* Update checksum */ if (*size) zip->entry_crc32 = zip->crc32func(zip->entry_crc32, *buff, (unsigned)*size); /* If we hit the end, swallow any end-of-data marker. */ if (zip->end_of_entry) { /* Check file size, CRC against these values. */ if (zip->entry->compressed_size != zip->entry_compressed_bytes_read) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "ZIP compressed data is wrong size " "(read %jd, expected %jd)", (intmax_t)zip->entry_compressed_bytes_read, (intmax_t)zip->entry->compressed_size); return (ARCHIVE_WARN); } /* Size field only stores the lower 32 bits of the actual * size. */ if ((zip->entry->uncompressed_size & UINT32_MAX) != (zip->entry_uncompressed_bytes_read & UINT32_MAX)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "ZIP uncompressed data is wrong size " "(read %jd, expected %jd)\n", (intmax_t)zip->entry_uncompressed_bytes_read, (intmax_t)zip->entry->uncompressed_size); return (ARCHIVE_WARN); } /* Check computed CRC against header */ if ((!zip->hctx_valid || zip->entry->aes_extra.vendor != AES_VENDOR_AE_2) && zip->entry->crc32 != zip->entry_crc32 && !zip->ignore_crc32) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "ZIP bad CRC: 0x%lx should be 0x%lx", (unsigned long)zip->entry_crc32, (unsigned long)zip->entry->crc32); return (ARCHIVE_WARN); } } return (ARCHIVE_OK); } static int archive_read_format_zip_cleanup(struct archive_read *a) { struct zip *zip; struct zip_entry *zip_entry, *next_zip_entry; zip = (struct zip *)(a->format->data); #ifdef HAVE_ZLIB_H if (zip->stream_valid) inflateEnd(&zip->stream); #endif #if HAVA_LZMA_H && HAVE_LIBLZMA if (zip->zipx_lzma_valid) { lzma_end(&zip->zipx_lzma_stream); } #endif #ifdef HAVE_BZLIB_H if (zip->bzstream_valid) { BZ2_bzDecompressEnd(&zip->bzstream); } #endif free(zip->uncompressed_buffer); if (zip->ppmd8_valid) __archive_ppmd8_functions.Ppmd8_Free(&zip->ppmd8); if (zip->zip_entries) { zip_entry = zip->zip_entries; while (zip_entry != NULL) { next_zip_entry = zip_entry->next; archive_string_free(&zip_entry->rsrcname); free(zip_entry); zip_entry = next_zip_entry; } } free(zip->decrypted_buffer); if (zip->cctx_valid) archive_decrypto_aes_ctr_release(&zip->cctx); if (zip->hctx_valid) archive_hmac_sha1_cleanup(&zip->hctx); free(zip->iv); free(zip->erd); free(zip->v_data); archive_string_free(&zip->format_name); free(zip); (a->format->data) = NULL; return (ARCHIVE_OK); } static int archive_read_format_zip_has_encrypted_entries(struct archive_read *_a) { if (_a && _a->format) { struct zip * zip = (struct zip *)_a->format->data; if (zip) { return zip->has_encrypted_entries; } } return ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW; } static int archive_read_format_zip_options(struct archive_read *a, const char *key, const char *val) { struct zip *zip; int ret = ARCHIVE_FAILED; zip = (struct zip *)(a->format->data); if (strcmp(key, "compat-2x") == 0) { /* Handle filenames as libarchive 2.x */ zip->init_default_conversion = (val != NULL) ? 1 : 0; return (ARCHIVE_OK); } else if (strcmp(key, "hdrcharset") == 0) { if (val == NULL || val[0] == 0) archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "zip: hdrcharset option needs a character-set name" ); else { zip->sconv = archive_string_conversion_from_charset( &a->archive, val, 0); if (zip->sconv != NULL) { if (strcmp(val, "UTF-8") == 0) zip->sconv_utf8 = zip->sconv; ret = ARCHIVE_OK; } else ret = ARCHIVE_FATAL; } return (ret); } else if (strcmp(key, "ignorecrc32") == 0) { /* Mostly useful for testing. */ if (val == NULL || val[0] == 0) { zip->crc32func = real_crc32; zip->ignore_crc32 = 0; } else { zip->crc32func = fake_crc32; zip->ignore_crc32 = 1; } return (ARCHIVE_OK); } else if (strcmp(key, "mac-ext") == 0) { zip->process_mac_extensions = (val != NULL && val[0] != 0); return (ARCHIVE_OK); } /* Note: The "warn" return is just to inform the options * supervisor that we didn't handle it. It will generate * a suitable error if no one used this option. */ return (ARCHIVE_WARN); } int archive_read_support_format_zip(struct archive *a) { int r; r = archive_read_support_format_zip_streamable(a); if (r != ARCHIVE_OK) return r; return (archive_read_support_format_zip_seekable(a)); } /* ------------------------------------------------------------------------ */ /* * Streaming-mode support */ static int archive_read_support_format_zip_capabilities_streamable(struct archive_read * a) { (void)a; /* UNUSED */ return (ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_DATA | ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_METADATA); } static int archive_read_format_zip_streamable_bid(struct archive_read *a, int best_bid) { const char *p; (void)best_bid; /* UNUSED */ if ((p = __archive_read_ahead(a, 4, NULL)) == NULL) return (-1); /* * Bid of 29 here comes from: * + 16 bits for "PK", * + next 16-bit field has 6 options so contributes * about 16 - log_2(6) ~= 16 - 2.6 ~= 13 bits * * So we've effectively verified ~29 total bits of check data. */ if (p[0] == 'P' && p[1] == 'K') { if ((p[2] == '\001' && p[3] == '\002') || (p[2] == '\003' && p[3] == '\004') || (p[2] == '\005' && p[3] == '\006') || (p[2] == '\006' && p[3] == '\006') || (p[2] == '\007' && p[3] == '\010') || (p[2] == '0' && p[3] == '0')) return (29); } /* TODO: It's worth looking ahead a little bit for a valid * PK signature. In particular, that would make it possible * to read some UUEncoded SFX files or SFX files coming from * a network socket. */ return (0); } static int archive_read_format_zip_streamable_read_header(struct archive_read *a, struct archive_entry *entry) { struct zip *zip; a->archive.archive_format = ARCHIVE_FORMAT_ZIP; if (a->archive.archive_format_name == NULL) a->archive.archive_format_name = "ZIP"; zip = (struct zip *)(a->format->data); /* * It should be sufficient to call archive_read_next_header() for * a reader to determine if an entry is encrypted or not. If the * encryption of an entry is only detectable when calling * archive_read_data(), so be it. We'll do the same check there * as well. */ if (zip->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) zip->has_encrypted_entries = 0; /* Make sure we have a zip_entry structure to use. */ if (zip->zip_entries == NULL) { zip->zip_entries = malloc(sizeof(struct zip_entry)); if (zip->zip_entries == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return ARCHIVE_FATAL; } } zip->entry = zip->zip_entries; memset(zip->entry, 0, sizeof(struct zip_entry)); if (zip->cctx_valid) archive_decrypto_aes_ctr_release(&zip->cctx); if (zip->hctx_valid) archive_hmac_sha1_cleanup(&zip->hctx); zip->tctx_valid = zip->cctx_valid = zip->hctx_valid = 0; __archive_read_reset_passphrase(a); /* Search ahead for the next local file header. */ __archive_read_consume(a, zip->unconsumed); zip->unconsumed = 0; for (;;) { int64_t skipped = 0; const char *p, *end; ssize_t bytes; p = __archive_read_ahead(a, 4, &bytes); if (p == NULL) return (ARCHIVE_FATAL); end = p + bytes; while (p + 4 <= end) { if (p[0] == 'P' && p[1] == 'K') { if (p[2] == '\003' && p[3] == '\004') { /* Regular file entry. */ __archive_read_consume(a, skipped); return zip_read_local_file_header(a, entry, zip); } /* * TODO: We cannot restore permissions * based only on the local file headers. * Consider scanning the central * directory and returning additional * entries for at least directories. * This would allow us to properly set * directory permissions. * * This won't help us fix symlinks * and may not help with regular file * permissions, either. <sigh> */ if (p[2] == '\001' && p[3] == '\002') { return (ARCHIVE_EOF); } /* End of central directory? Must be an * empty archive. */ if ((p[2] == '\005' && p[3] == '\006') || (p[2] == '\006' && p[3] == '\006')) return (ARCHIVE_EOF); } ++p; ++skipped; } __archive_read_consume(a, skipped); } } static int archive_read_format_zip_read_data_skip_streamable(struct archive_read *a) { struct zip *zip; int64_t bytes_skipped; zip = (struct zip *)(a->format->data); bytes_skipped = __archive_read_consume(a, zip->unconsumed); zip->unconsumed = 0; if (bytes_skipped < 0) return (ARCHIVE_FATAL); /* If we've already read to end of data, we're done. */ if (zip->end_of_entry) return (ARCHIVE_OK); /* So we know we're streaming... */ if (0 == (zip->entry->zip_flags & ZIP_LENGTH_AT_END) || zip->entry->compressed_size > 0) { /* We know the compressed length, so we can just skip. */ bytes_skipped = __archive_read_consume(a, zip->entry_bytes_remaining); if (bytes_skipped < 0) return (ARCHIVE_FATAL); return (ARCHIVE_OK); } if (zip->init_decryption) { int r; zip->has_encrypted_entries = 1; if (zip->entry->zip_flags & ZIP_STRONG_ENCRYPTED) r = read_decryption_header(a); else if (zip->entry->compression == WINZIP_AES_ENCRYPTION) r = init_WinZip_AES_decryption(a); else r = init_traditional_PKWARE_decryption(a); if (r != ARCHIVE_OK) return (r); zip->init_decryption = 0; } /* We're streaming and we don't know the length. */ /* If the body is compressed and we know the format, we can * find an exact end-of-entry by decompressing it. */ switch (zip->entry->compression) { #ifdef HAVE_ZLIB_H case 8: /* Deflate compression. */ while (!zip->end_of_entry) { int64_t offset = 0; const void *buff = NULL; size_t size = 0; int r; r = zip_read_data_deflate(a, &buff, &size, &offset); if (r != ARCHIVE_OK) return (r); } return ARCHIVE_OK; #endif default: /* Uncompressed or unknown. */ /* Scan for a PK\007\010 signature. */ for (;;) { const char *p, *buff; ssize_t bytes_avail; buff = __archive_read_ahead(a, 16, &bytes_avail); if (bytes_avail < 16) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file data"); return (ARCHIVE_FATAL); } p = buff; while (p <= buff + bytes_avail - 16) { if (p[3] == 'P') { p += 3; } else if (p[3] == 'K') { p += 2; } else if (p[3] == '\007') { p += 1; } else if (p[3] == '\010' && p[2] == '\007' && p[1] == 'K' && p[0] == 'P') { if (zip->entry->flags & LA_USED_ZIP64) __archive_read_consume(a, p - buff + 24); else __archive_read_consume(a, p - buff + 16); return ARCHIVE_OK; } else { p += 4; } } __archive_read_consume(a, p - buff); } } } int archive_read_support_format_zip_streamable(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct zip *zip; int r; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_zip"); zip = (struct zip *)calloc(1, sizeof(*zip)); if (zip == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate zip data"); return (ARCHIVE_FATAL); } /* Streamable reader doesn't support mac extensions. */ zip->process_mac_extensions = 0; /* * Until enough data has been read, we cannot tell about * any encrypted entries yet. */ zip->has_encrypted_entries = ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW; zip->crc32func = real_crc32; r = __archive_read_register_format(a, zip, "zip", archive_read_format_zip_streamable_bid, archive_read_format_zip_options, archive_read_format_zip_streamable_read_header, archive_read_format_zip_read_data, archive_read_format_zip_read_data_skip_streamable, NULL, archive_read_format_zip_cleanup, archive_read_support_format_zip_capabilities_streamable, archive_read_format_zip_has_encrypted_entries); if (r != ARCHIVE_OK) free(zip); return (ARCHIVE_OK); } /* ------------------------------------------------------------------------ */ /* * Seeking-mode support */ static int archive_read_support_format_zip_capabilities_seekable(struct archive_read * a) { (void)a; /* UNUSED */ return (ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_DATA | ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_METADATA); } /* * TODO: This is a performance sink because it forces the read core to * drop buffered data from the start of file, which will then have to * be re-read again if this bidder loses. * * We workaround this a little by passing in the best bid so far so * that later bidders can do nothing if they know they'll never * outbid. But we can certainly do better... */ static int read_eocd(struct zip *zip, const char *p, int64_t current_offset) { /* Sanity-check the EOCD we've found. */ /* This must be the first volume. */ if (archive_le16dec(p + 4) != 0) return 0; /* Central directory must be on this volume. */ if (archive_le16dec(p + 4) != archive_le16dec(p + 6)) return 0; /* All central directory entries must be on this volume. */ if (archive_le16dec(p + 10) != archive_le16dec(p + 8)) return 0; /* Central directory can't extend beyond start of EOCD record. */ if (archive_le32dec(p + 16) + archive_le32dec(p + 12) > current_offset) return 0; /* Save the central directory location for later use. */ zip->central_directory_offset = archive_le32dec(p + 16); /* This is just a tiny bit higher than the maximum returned by the streaming Zip bidder. This ensures that the more accurate seeking Zip parser wins whenever seek is available. */ return 32; } /* * Examine Zip64 EOCD locator: If it's valid, store the information * from it. */ static int read_zip64_eocd(struct archive_read *a, struct zip *zip, const char *p) { int64_t eocd64_offset; int64_t eocd64_size; /* Sanity-check the locator record. */ /* Central dir must be on first volume. */ if (archive_le32dec(p + 4) != 0) return 0; /* Must be only a single volume. */ if (archive_le32dec(p + 16) != 1) return 0; /* Find the Zip64 EOCD record. */ eocd64_offset = archive_le64dec(p + 8); if (__archive_read_seek(a, eocd64_offset, SEEK_SET) < 0) return 0; if ((p = __archive_read_ahead(a, 56, NULL)) == NULL) return 0; /* Make sure we can read all of it. */ eocd64_size = archive_le64dec(p + 4) + 12; if (eocd64_size < 56 || eocd64_size > 16384) return 0; if ((p = __archive_read_ahead(a, (size_t)eocd64_size, NULL)) == NULL) return 0; /* Sanity-check the EOCD64 */ if (archive_le32dec(p + 16) != 0) /* Must be disk #0 */ return 0; if (archive_le32dec(p + 20) != 0) /* CD must be on disk #0 */ return 0; /* CD can't be split. */ if (archive_le64dec(p + 24) != archive_le64dec(p + 32)) return 0; /* Save the central directory offset for later use. */ zip->central_directory_offset = archive_le64dec(p + 48); return 32; } static int archive_read_format_zip_seekable_bid(struct archive_read *a, int best_bid) { struct zip *zip = (struct zip *)a->format->data; int64_t file_size, current_offset; const char *p; int i, tail; /* If someone has already bid more than 32, then avoid trashing the look-ahead buffers with a seek. */ if (best_bid > 32) return (-1); file_size = __archive_read_seek(a, 0, SEEK_END); if (file_size <= 0) return 0; /* Search last 16k of file for end-of-central-directory * record (which starts with PK\005\006) */ tail = (int)zipmin(1024 * 16, file_size); current_offset = __archive_read_seek(a, -tail, SEEK_END); if (current_offset < 0) return 0; if ((p = __archive_read_ahead(a, (size_t)tail, NULL)) == NULL) return 0; /* Boyer-Moore search backwards from the end, since we want * to match the last EOCD in the file (there can be more than * one if there is an uncompressed Zip archive as a member * within this Zip archive). */ for (i = tail - 22; i > 0;) { switch (p[i]) { case 'P': if (memcmp(p + i, "PK\005\006", 4) == 0) { int ret = read_eocd(zip, p + i, current_offset + i); /* Zip64 EOCD locator precedes * regular EOCD if present. */ if (i >= 20 && memcmp(p + i - 20, "PK\006\007", 4) == 0) { int ret_zip64 = read_zip64_eocd(a, zip, p + i - 20); if (ret_zip64 > ret) ret = ret_zip64; } return (ret); } i -= 4; break; case 'K': i -= 1; break; case 005: i -= 2; break; case 006: i -= 3; break; default: i -= 4; break; } } return 0; } /* The red-black trees are only used in seeking mode to manage * the in-memory copy of the central directory. */ static int cmp_node(const struct archive_rb_node *n1, const struct archive_rb_node *n2) { const struct zip_entry *e1 = (const struct zip_entry *)n1; const struct zip_entry *e2 = (const struct zip_entry *)n2; if (e1->local_header_offset > e2->local_header_offset) return -1; if (e1->local_header_offset < e2->local_header_offset) return 1; return 0; } static int cmp_key(const struct archive_rb_node *n, const void *key) { /* This function won't be called */ (void)n; /* UNUSED */ (void)key; /* UNUSED */ return 1; } static const struct archive_rb_tree_ops rb_ops = { &cmp_node, &cmp_key }; static int rsrc_cmp_node(const struct archive_rb_node *n1, const struct archive_rb_node *n2) { const struct zip_entry *e1 = (const struct zip_entry *)n1; const struct zip_entry *e2 = (const struct zip_entry *)n2; return (strcmp(e2->rsrcname.s, e1->rsrcname.s)); } static int rsrc_cmp_key(const struct archive_rb_node *n, const void *key) { const struct zip_entry *e = (const struct zip_entry *)n; return (strcmp((const char *)key, e->rsrcname.s)); } static const struct archive_rb_tree_ops rb_rsrc_ops = { &rsrc_cmp_node, &rsrc_cmp_key }; static const char * rsrc_basename(const char *name, size_t name_length) { const char *s, *r; r = s = name; for (;;) { s = memchr(s, '/', name_length - (s - name)); if (s == NULL) break; r = ++s; } return (r); } static void expose_parent_dirs(struct zip *zip, const char *name, size_t name_length) { struct archive_string str; struct zip_entry *dir; char *s; archive_string_init(&str); archive_strncpy(&str, name, name_length); for (;;) { s = strrchr(str.s, '/'); if (s == NULL) break; *s = '\0'; /* Transfer the parent directory from zip->tree_rsrc RB * tree to zip->tree RB tree to expose. */ dir = (struct zip_entry *) __archive_rb_tree_find_node(&zip->tree_rsrc, str.s); if (dir == NULL) break; __archive_rb_tree_remove_node(&zip->tree_rsrc, &dir->node); archive_string_free(&dir->rsrcname); __archive_rb_tree_insert_node(&zip->tree, &dir->node); } archive_string_free(&str); } static int slurp_central_directory(struct archive_read *a, struct zip *zip) { ssize_t i; unsigned found; int64_t correction; ssize_t bytes_avail; const char *p; /* * Find the start of the central directory. The end-of-CD * record has our starting point, but there are lots of * Zip archives which have had other data prepended to the * file, which makes the recorded offsets all too small. * So we search forward from the specified offset until we * find the real start of the central directory. Then we * know the correction we need to apply to account for leading * padding. */ if (__archive_read_seek(a, zip->central_directory_offset, SEEK_SET) < 0) return ARCHIVE_FATAL; found = 0; while (!found) { if ((p = __archive_read_ahead(a, 20, &bytes_avail)) == NULL) return ARCHIVE_FATAL; for (found = 0, i = 0; !found && i < bytes_avail - 4;) { switch (p[i + 3]) { case 'P': i += 3; break; case 'K': i += 2; break; case 001: i += 1; break; case 002: if (memcmp(p + i, "PK\001\002", 4) == 0) { p += i; found = 1; } else i += 4; break; case 005: i += 1; break; case 006: if (memcmp(p + i, "PK\005\006", 4) == 0) { p += i; found = 1; } else if (memcmp(p + i, "PK\006\006", 4) == 0) { p += i; found = 1; } else i += 1; break; default: i += 4; break; } } __archive_read_consume(a, i); } correction = archive_filter_bytes(&a->archive, 0) - zip->central_directory_offset; __archive_rb_tree_init(&zip->tree, &rb_ops); __archive_rb_tree_init(&zip->tree_rsrc, &rb_rsrc_ops); zip->central_directory_entries_total = 0; while (1) { struct zip_entry *zip_entry; size_t filename_length, extra_length, comment_length; uint32_t external_attributes; const char *name, *r; if ((p = __archive_read_ahead(a, 4, NULL)) == NULL) return ARCHIVE_FATAL; if (memcmp(p, "PK\006\006", 4) == 0 || memcmp(p, "PK\005\006", 4) == 0) { break; } else if (memcmp(p, "PK\001\002", 4) != 0) { archive_set_error(&a->archive, -1, "Invalid central directory signature"); return ARCHIVE_FATAL; } if ((p = __archive_read_ahead(a, 46, NULL)) == NULL) return ARCHIVE_FATAL; zip_entry = calloc(1, sizeof(struct zip_entry)); if (zip_entry == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate zip entry"); return ARCHIVE_FATAL; } zip_entry->next = zip->zip_entries; zip_entry->flags |= LA_FROM_CENTRAL_DIRECTORY; zip->zip_entries = zip_entry; zip->central_directory_entries_total++; /* version = p[4]; */ zip_entry->system = p[5]; /* version_required = archive_le16dec(p + 6); */ zip_entry->zip_flags = archive_le16dec(p + 8); if (zip_entry->zip_flags & (ZIP_ENCRYPTED | ZIP_STRONG_ENCRYPTED)){ zip->has_encrypted_entries = 1; } zip_entry->compression = (char)archive_le16dec(p + 10); zip_entry->mtime = zip_time(p + 12); zip_entry->crc32 = archive_le32dec(p + 16); if (zip_entry->zip_flags & ZIP_LENGTH_AT_END) zip_entry->decdat = p[13]; else zip_entry->decdat = p[19]; zip_entry->compressed_size = archive_le32dec(p + 20); zip_entry->uncompressed_size = archive_le32dec(p + 24); filename_length = archive_le16dec(p + 28); extra_length = archive_le16dec(p + 30); comment_length = archive_le16dec(p + 32); /* disk_start = archive_le16dec(p + 34); */ /* Better be zero. */ /* internal_attributes = archive_le16dec(p + 36); */ /* text bit */ external_attributes = archive_le32dec(p + 38); zip_entry->local_header_offset = archive_le32dec(p + 42) + correction; /* If we can't guess the mode, leave it zero here; when we read the local file header we might get more information. */ if (zip_entry->system == 3) { zip_entry->mode = external_attributes >> 16; } else if (zip_entry->system == 0) { // Interpret MSDOS directory bit if (0x10 == (external_attributes & 0x10)) { zip_entry->mode = AE_IFDIR | 0775; } else { zip_entry->mode = AE_IFREG | 0664; } if (0x01 == (external_attributes & 0x01)) { // Read-only bit; strip write permissions zip_entry->mode &= 0555; } } else { zip_entry->mode = 0; } /* We're done with the regular data; get the filename and * extra data. */ __archive_read_consume(a, 46); p = __archive_read_ahead(a, filename_length + extra_length, NULL); if (p == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file header"); return ARCHIVE_FATAL; } if (ARCHIVE_OK != process_extra(a, p + filename_length, extra_length, zip_entry)) { return ARCHIVE_FATAL; } /* * Mac resource fork files are stored under the * "__MACOSX/" directory, so we should check if * it is. */ if (!zip->process_mac_extensions) { /* Treat every entry as a regular entry. */ __archive_rb_tree_insert_node(&zip->tree, &zip_entry->node); } else { name = p; r = rsrc_basename(name, filename_length); if (filename_length >= 9 && strncmp("__MACOSX/", name, 9) == 0) { /* If this file is not a resource fork nor * a directory. We should treat it as a non * resource fork file to expose it. */ if (name[filename_length-1] != '/' && (r - name < 3 || r[0] != '.' || r[1] != '_')) { __archive_rb_tree_insert_node( &zip->tree, &zip_entry->node); /* Expose its parent directories. */ expose_parent_dirs(zip, name, filename_length); } else { /* This file is a resource fork file or * a directory. */ archive_strncpy(&(zip_entry->rsrcname), name, filename_length); __archive_rb_tree_insert_node( &zip->tree_rsrc, &zip_entry->node); } } else { /* Generate resource fork name to find its * resource file at zip->tree_rsrc. */ archive_strcpy(&(zip_entry->rsrcname), "__MACOSX/"); archive_strncat(&(zip_entry->rsrcname), name, r - name); archive_strcat(&(zip_entry->rsrcname), "._"); archive_strncat(&(zip_entry->rsrcname), name + (r - name), filename_length - (r - name)); /* Register an entry to RB tree to sort it by * file offset. */ __archive_rb_tree_insert_node(&zip->tree, &zip_entry->node); } } /* Skip the comment too ... */ __archive_read_consume(a, filename_length + extra_length + comment_length); } return ARCHIVE_OK; } static ssize_t zip_get_local_file_header_size(struct archive_read *a, size_t extra) { const char *p; ssize_t filename_length, extra_length; if ((p = __archive_read_ahead(a, extra + 30, NULL)) == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file header"); return (ARCHIVE_WARN); } p += extra; if (memcmp(p, "PK\003\004", 4) != 0) { archive_set_error(&a->archive, -1, "Damaged Zip archive"); return ARCHIVE_WARN; } filename_length = archive_le16dec(p + 26); extra_length = archive_le16dec(p + 28); return (30 + filename_length + extra_length); } static int zip_read_mac_metadata(struct archive_read *a, struct archive_entry *entry, struct zip_entry *rsrc) { struct zip *zip = (struct zip *)a->format->data; unsigned char *metadata, *mp; int64_t offset = archive_filter_bytes(&a->archive, 0); size_t remaining_bytes, metadata_bytes; ssize_t hsize; int ret = ARCHIVE_OK, eof; switch(rsrc->compression) { case 0: /* No compression. */ if (rsrc->uncompressed_size != rsrc->compressed_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Malformed OS X metadata entry: inconsistent size"); return (ARCHIVE_FATAL); } #ifdef HAVE_ZLIB_H case 8: /* Deflate compression. */ #endif break; default: /* Unsupported compression. */ /* Return a warning. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported ZIP compression method (%s)", compression_name(rsrc->compression)); /* We can't decompress this entry, but we will * be able to skip() it and try the next entry. */ return (ARCHIVE_WARN); } if (rsrc->uncompressed_size > (4 * 1024 * 1024)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Mac metadata is too large: %jd > 4M bytes", (intmax_t)rsrc->uncompressed_size); return (ARCHIVE_WARN); } if (rsrc->compressed_size > (4 * 1024 * 1024)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Mac metadata is too large: %jd > 4M bytes", (intmax_t)rsrc->compressed_size); return (ARCHIVE_WARN); } metadata = malloc((size_t)rsrc->uncompressed_size); if (metadata == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Mac metadata"); return (ARCHIVE_FATAL); } if (offset < rsrc->local_header_offset) __archive_read_consume(a, rsrc->local_header_offset - offset); else if (offset != rsrc->local_header_offset) { __archive_read_seek(a, rsrc->local_header_offset, SEEK_SET); } hsize = zip_get_local_file_header_size(a, 0); __archive_read_consume(a, hsize); remaining_bytes = (size_t)rsrc->compressed_size; metadata_bytes = (size_t)rsrc->uncompressed_size; mp = metadata; eof = 0; while (!eof && remaining_bytes) { const unsigned char *p; ssize_t bytes_avail; size_t bytes_used; p = __archive_read_ahead(a, 1, &bytes_avail); if (p == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated ZIP file header"); ret = ARCHIVE_WARN; goto exit_mac_metadata; } if ((size_t)bytes_avail > remaining_bytes) bytes_avail = remaining_bytes; switch(rsrc->compression) { case 0: /* No compression. */ if ((size_t)bytes_avail > metadata_bytes) bytes_avail = metadata_bytes; memcpy(mp, p, bytes_avail); bytes_used = (size_t)bytes_avail; metadata_bytes -= bytes_used; mp += bytes_used; if (metadata_bytes == 0) eof = 1; break; #ifdef HAVE_ZLIB_H case 8: /* Deflate compression. */ { int r; ret = zip_deflate_init(a, zip); if (ret != ARCHIVE_OK) goto exit_mac_metadata; zip->stream.next_in = (Bytef *)(uintptr_t)(const void *)p; zip->stream.avail_in = (uInt)bytes_avail; zip->stream.total_in = 0; zip->stream.next_out = mp; zip->stream.avail_out = (uInt)metadata_bytes; zip->stream.total_out = 0; r = inflate(&zip->stream, 0); switch (r) { case Z_OK: break; case Z_STREAM_END: eof = 1; break; case Z_MEM_ERROR: archive_set_error(&a->archive, ENOMEM, "Out of memory for ZIP decompression"); ret = ARCHIVE_FATAL; goto exit_mac_metadata; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "ZIP decompression failed (%d)", r); ret = ARCHIVE_FATAL; goto exit_mac_metadata; } bytes_used = zip->stream.total_in; metadata_bytes -= zip->stream.total_out; mp += zip->stream.total_out; break; } #endif default: bytes_used = 0; break; } __archive_read_consume(a, bytes_used); remaining_bytes -= bytes_used; } archive_entry_copy_mac_metadata(entry, metadata, (size_t)rsrc->uncompressed_size - metadata_bytes); exit_mac_metadata: __archive_read_seek(a, offset, SEEK_SET); zip->decompress_init = 0; free(metadata); return (ret); } static int archive_read_format_zip_seekable_read_header(struct archive_read *a, struct archive_entry *entry) { struct zip *zip = (struct zip *)a->format->data; struct zip_entry *rsrc; int64_t offset; int r, ret = ARCHIVE_OK; /* * It should be sufficient to call archive_read_next_header() for * a reader to determine if an entry is encrypted or not. If the * encryption of an entry is only detectable when calling * archive_read_data(), so be it. We'll do the same check there * as well. */ if (zip->has_encrypted_entries == ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW) zip->has_encrypted_entries = 0; a->archive.archive_format = ARCHIVE_FORMAT_ZIP; if (a->archive.archive_format_name == NULL) a->archive.archive_format_name = "ZIP"; if (zip->zip_entries == NULL) { r = slurp_central_directory(a, zip); if (r != ARCHIVE_OK) return r; /* Get first entry whose local header offset is lower than * other entries in the archive file. */ zip->entry = (struct zip_entry *)ARCHIVE_RB_TREE_MIN(&zip->tree); } else if (zip->entry != NULL) { /* Get next entry in local header offset order. */ zip->entry = (struct zip_entry *)__archive_rb_tree_iterate( &zip->tree, &zip->entry->node, ARCHIVE_RB_DIR_RIGHT); } if (zip->entry == NULL) return ARCHIVE_EOF; if (zip->entry->rsrcname.s) rsrc = (struct zip_entry *)__archive_rb_tree_find_node( &zip->tree_rsrc, zip->entry->rsrcname.s); else rsrc = NULL; if (zip->cctx_valid) archive_decrypto_aes_ctr_release(&zip->cctx); if (zip->hctx_valid) archive_hmac_sha1_cleanup(&zip->hctx); zip->tctx_valid = zip->cctx_valid = zip->hctx_valid = 0; __archive_read_reset_passphrase(a); /* File entries are sorted by the header offset, we should mostly * use __archive_read_consume to advance a read point to avoid redundant * data reading. */ offset = archive_filter_bytes(&a->archive, 0); if (offset < zip->entry->local_header_offset) __archive_read_consume(a, zip->entry->local_header_offset - offset); else if (offset != zip->entry->local_header_offset) { __archive_read_seek(a, zip->entry->local_header_offset, SEEK_SET); } zip->unconsumed = 0; r = zip_read_local_file_header(a, entry, zip); if (r != ARCHIVE_OK) return r; if (rsrc) { int ret2 = zip_read_mac_metadata(a, entry, rsrc); if (ret2 < ret) ret = ret2; } return (ret); } /* * We're going to seek for the next header anyway, so we don't * need to bother doing anything here. */ static int archive_read_format_zip_read_data_skip_seekable(struct archive_read *a) { struct zip *zip; zip = (struct zip *)(a->format->data); zip->unconsumed = 0; return (ARCHIVE_OK); } int archive_read_support_format_zip_seekable(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct zip *zip; int r; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_zip_seekable"); zip = (struct zip *)calloc(1, sizeof(*zip)); if (zip == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate zip data"); return (ARCHIVE_FATAL); } #ifdef HAVE_COPYFILE_H /* Set this by default on Mac OS. */ zip->process_mac_extensions = 1; #endif /* * Until enough data has been read, we cannot tell about * any encrypted entries yet. */ zip->has_encrypted_entries = ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW; zip->crc32func = real_crc32; r = __archive_read_register_format(a, zip, "zip", archive_read_format_zip_seekable_bid, archive_read_format_zip_options, archive_read_format_zip_seekable_read_header, archive_read_format_zip_read_data, archive_read_format_zip_read_data_skip_seekable, NULL, archive_read_format_zip_cleanup, archive_read_support_format_zip_capabilities_seekable, archive_read_format_zip_has_encrypted_entries); if (r != ARCHIVE_OK) free(zip); return (ARCHIVE_OK); } /*# vim:set noet:*/
./CrossVul/dataset_final_sorted/CWE-399/c/bad_814_0
crossvul-cpp_data_good_2147_0
/* * Copyright (c) Ian F. Darwin 1986-1995. * Software written by Ian F. Darwin and others; * maintained 1995-present by Christos Zoulas and others. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * softmagic - interpret variable magic from MAGIC */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: softmagic.c,v 1.188 2014/05/14 23:15:42 christos Exp $") #endif /* lint */ #include "magic.h" #include <assert.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include <time.h> #if defined(HAVE_LOCALE_H) #include <locale.h> #endif private int match(struct magic_set *, struct magic *, uint32_t, const unsigned char *, size_t, size_t, int, int, int, int, int *, int *, int *); private int mget(struct magic_set *, const unsigned char *, struct magic *, size_t, size_t, unsigned int, int, int, int, int, int *, int *, int *); private int magiccheck(struct magic_set *, struct magic *); private int32_t mprint(struct magic_set *, struct magic *); private int32_t moffset(struct magic_set *, struct magic *); private void mdebug(uint32_t, const char *, size_t); private int mcopy(struct magic_set *, union VALUETYPE *, int, int, const unsigned char *, uint32_t, size_t, size_t); private int mconvert(struct magic_set *, struct magic *, int); private int print_sep(struct magic_set *, int); private int handle_annotation(struct magic_set *, struct magic *); private void cvt_8(union VALUETYPE *, const struct magic *); private void cvt_16(union VALUETYPE *, const struct magic *); private void cvt_32(union VALUETYPE *, const struct magic *); private void cvt_64(union VALUETYPE *, const struct magic *); #define OFFSET_OOB(n, o, i) ((n) < (o) || (i) > ((n) - (o))) /* * softmagic - lookup one file in parsed, in-memory copy of database * Passed the name and FILE * of one file to be typed. */ /*ARGSUSED1*/ /* nbytes passed for regularity, maybe need later */ protected int file_softmagic(struct magic_set *ms, const unsigned char *buf, size_t nbytes, size_t level, int mode, int text) { struct mlist *ml; int rv, printed_something = 0, need_separator = 0; for (ml = ms->mlist[0]->next; ml != ms->mlist[0]; ml = ml->next) if ((rv = match(ms, ml->magic, ml->nmagic, buf, nbytes, 0, mode, text, 0, level, &printed_something, &need_separator, NULL)) != 0) return rv; return 0; } #define FILE_FMTDEBUG #ifdef FILE_FMTDEBUG #define F(a, b, c) file_fmtcheck((a), (b), (c), __FILE__, __LINE__) private const char * __attribute__((__format_arg__(3))) file_fmtcheck(struct magic_set *ms, const struct magic *m, const char *def, const char *file, size_t line) { const char *ptr = fmtcheck(m->desc, def); if (ptr == def) file_magerror(ms, "%s, %zu: format `%s' does not match with `%s'", file, line, m->desc, def); return ptr; } #else #define F(a, b, c) fmtcheck((b)->desc, (c)) #endif /* * Go through the whole list, stopping if you find a match. Process all * the continuations of that match before returning. * * We support multi-level continuations: * * At any time when processing a successful top-level match, there is a * current continuation level; it represents the level of the last * successfully matched continuation. * * Continuations above that level are skipped as, if we see one, it * means that the continuation that controls them - i.e, the * lower-level continuation preceding them - failed to match. * * Continuations below that level are processed as, if we see one, * it means we've finished processing or skipping higher-level * continuations under the control of a successful or unsuccessful * lower-level continuation, and are now seeing the next lower-level * continuation and should process it. The current continuation * level reverts to the level of the one we're seeing. * * Continuations at the current level are processed as, if we see * one, there's no lower-level continuation that may have failed. * * If a continuation matches, we bump the current continuation level * so that higher-level continuations are processed. */ private int match(struct magic_set *ms, struct magic *magic, uint32_t nmagic, const unsigned char *s, size_t nbytes, size_t offset, int mode, int text, int flip, int recursion_level, int *printed_something, int *need_separator, int *returnval) { uint32_t magindex = 0; unsigned int cont_level = 0; int returnvalv = 0, e; /* if a match is found it is set to 1*/ int firstline = 1; /* a flag to print X\n X\n- X */ int print = (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0; if (returnval == NULL) returnval = &returnvalv; if (file_check_mem(ms, cont_level) == -1) return -1; for (magindex = 0; magindex < nmagic; magindex++) { int flush = 0; struct magic *m = &magic[magindex]; if (m->type != FILE_NAME) if ((IS_STRING(m->type) && #define FLT (STRING_BINTEST | STRING_TEXTTEST) ((text && (m->str_flags & FLT) == STRING_BINTEST) || (!text && (m->str_flags & FLT) == STRING_TEXTTEST))) || (m->flag & mode) != mode) { /* Skip sub-tests */ while (magindex + 1 < nmagic && magic[magindex + 1].cont_level != 0 && ++magindex) continue; continue; /* Skip to next top-level test*/ } ms->offset = m->offset; ms->line = m->lineno; /* if main entry matches, print it... */ switch (mget(ms, s, m, nbytes, offset, cont_level, mode, text, flip, recursion_level + 1, printed_something, need_separator, returnval)) { case -1: return -1; case 0: flush = m->reln != '!'; break; default: if (m->type == FILE_INDIRECT) *returnval = 1; switch (magiccheck(ms, m)) { case -1: return -1; case 0: flush++; break; default: flush = 0; break; } break; } if (flush) { /* * main entry didn't match, * flush its continuations */ while (magindex < nmagic - 1 && magic[magindex + 1].cont_level != 0) magindex++; continue; } if ((e = handle_annotation(ms, m)) != 0) { *need_separator = 1; *printed_something = 1; *returnval = 1; return e; } /* * If we are going to print something, we'll need to print * a blank before we print something else. */ if (*m->desc) { *need_separator = 1; *printed_something = 1; if (print_sep(ms, firstline) == -1) return -1; } if (print && mprint(ms, m) == -1) return -1; ms->c.li[cont_level].off = moffset(ms, m); /* and any continuations that match */ if (file_check_mem(ms, ++cont_level) == -1) return -1; while (++magindex < nmagic && magic[magindex].cont_level != 0) { m = &magic[magindex]; ms->line = m->lineno; /* for messages */ if (cont_level < m->cont_level) continue; if (cont_level > m->cont_level) { /* * We're at the end of the level * "cont_level" continuations. */ cont_level = m->cont_level; } ms->offset = m->offset; if (m->flag & OFFADD) { ms->offset += ms->c.li[cont_level - 1].off; } #ifdef ENABLE_CONDITIONALS if (m->cond == COND_ELSE || m->cond == COND_ELIF) { if (ms->c.li[cont_level].last_match == 1) continue; } #endif switch (mget(ms, s, m, nbytes, offset, cont_level, mode, text, flip, recursion_level + 1, printed_something, need_separator, returnval)) { case -1: return -1; case 0: if (m->reln != '!') continue; flush = 1; break; default: if (m->type == FILE_INDIRECT) *returnval = 1; flush = 0; break; } switch (flush ? 1 : magiccheck(ms, m)) { case -1: return -1; case 0: #ifdef ENABLE_CONDITIONALS ms->c.li[cont_level].last_match = 0; #endif break; default: #ifdef ENABLE_CONDITIONALS ms->c.li[cont_level].last_match = 1; #endif if (m->type == FILE_CLEAR) ms->c.li[cont_level].got_match = 0; else if (ms->c.li[cont_level].got_match) { if (m->type == FILE_DEFAULT) break; } else ms->c.li[cont_level].got_match = 1; if ((e = handle_annotation(ms, m)) != 0) { *need_separator = 1; *printed_something = 1; *returnval = 1; return e; } /* * If we are going to print something, * make sure that we have a separator first. */ if (*m->desc) { if (!*printed_something) { *printed_something = 1; if (print_sep(ms, firstline) == -1) return -1; } } /* * This continuation matched. Print * its message, with a blank before it * if the previous item printed and * this item isn't empty. */ /* space if previous printed */ if (*need_separator && ((m->flag & NOSPACE) == 0) && *m->desc) { if (print && file_printf(ms, " ") == -1) return -1; *need_separator = 0; } if (print && mprint(ms, m) == -1) return -1; ms->c.li[cont_level].off = moffset(ms, m); if (*m->desc) *need_separator = 1; /* * If we see any continuations * at a higher level, * process them. */ if (file_check_mem(ms, ++cont_level) == -1) return -1; break; } } if (*printed_something) { firstline = 0; if (print) *returnval = 1; } if ((ms->flags & MAGIC_CONTINUE) == 0 && *printed_something) { return *returnval; /* don't keep searching */ } } return *returnval; /* This is hit if -k is set or there is no match */ } private int check_fmt(struct magic_set *ms, struct magic *m) { file_regex_t rx; int rc, rv = -1; if (strchr(m->desc, '%') == NULL) return 0; rc = file_regcomp(&rx, "%[-0-9\\.]*s", REG_EXTENDED|REG_NOSUB); if (rc) { file_regerror(&rx, rc, ms); } else { rc = file_regexec(&rx, m->desc, 0, 0, 0); rv = !rc; } file_regfree(&rx); return rv; } #ifndef HAVE_STRNDUP char * strndup(const char *, size_t); char * strndup(const char *str, size_t n) { size_t len; char *copy; for (len = 0; len < n && str[len]; len++) continue; if ((copy = malloc(len + 1)) == NULL) return NULL; (void)memcpy(copy, str, len); copy[len] = '\0'; return copy; } #endif /* HAVE_STRNDUP */ private int32_t mprint(struct magic_set *ms, struct magic *m) { uint64_t v; float vf; double vd; int64_t t = 0; char buf[128], tbuf[26]; union VALUETYPE *p = &ms->ms_value; switch (m->type) { case FILE_BYTE: v = file_signextend(ms, m, (uint64_t)p->b); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%d", (unsigned char)v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%d"), (unsigned char) v) == -1) return -1; break; } t = ms->offset + sizeof(char); break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: v = file_signextend(ms, m, (uint64_t)p->h); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%u", (unsigned short)v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%u"), (unsigned short) v) == -1) return -1; break; } t = ms->offset + sizeof(short); break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: v = file_signextend(ms, m, (uint64_t)p->l); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%u", (uint32_t) v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%u"), (uint32_t) v) == -1) return -1; break; } t = ms->offset + sizeof(int32_t); break; case FILE_QUAD: case FILE_BEQUAD: case FILE_LEQUAD: v = file_signextend(ms, m, p->q); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%" INT64_T_FORMAT "u", (unsigned long long)v); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%" INT64_T_FORMAT "u"), (unsigned long long) v) == -1) return -1; break; } t = ms->offset + sizeof(int64_t); break; case FILE_STRING: case FILE_PSTRING: case FILE_BESTRING16: case FILE_LESTRING16: if (m->reln == '=' || m->reln == '!') { if (file_printf(ms, F(ms, m, "%s"), m->value.s) == -1) return -1; t = ms->offset + m->vallen; } else { char *str = p->s; /* compute t before we mangle the string? */ t = ms->offset + strlen(str); if (*m->value.s == '\0') str[strcspn(str, "\n")] = '\0'; if (m->str_flags & STRING_TRIM) { char *last; while (isspace((unsigned char)*str)) str++; last = str; while (*last) last++; --last; while (isspace((unsigned char)*last)) last--; *++last = '\0'; } if (file_printf(ms, F(ms, m, "%s"), str) == -1) return -1; if (m->type == FILE_PSTRING) t += file_pstring_length_size(m); } break; case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->l, FILE_T_LOCAL, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint32_t); break; case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->l, 0, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint32_t); break; case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->q, FILE_T_LOCAL, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->q, 0, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_QWDATE: case FILE_BEQWDATE: case FILE_LEQWDATE: if (file_printf(ms, F(ms, m, "%s"), file_fmttime(p->q, FILE_T_WINDOWS, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: vf = p->f; switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%g", vf); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%g"), vf) == -1) return -1; break; } t = ms->offset + sizeof(float); break; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: vd = p->d; switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%g", vd); if (file_printf(ms, F(ms, m, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(ms, m, "%g"), vd) == -1) return -1; break; } t = ms->offset + sizeof(double); break; case FILE_REGEX: { char *cp; int rval; cp = strndup((const char *)ms->search.s, ms->search.rm_len); if (cp == NULL) { file_oomem(ms, ms->search.rm_len); return -1; } rval = file_printf(ms, F(ms, m, "%s"), cp); free(cp); if (rval == -1) return -1; if ((m->str_flags & REGEX_OFFSET_START)) t = ms->search.offset; else t = ms->search.offset + ms->search.rm_len; break; } case FILE_SEARCH: if (file_printf(ms, F(ms, m, "%s"), m->value.s) == -1) return -1; if ((m->str_flags & REGEX_OFFSET_START)) t = ms->search.offset; else t = ms->search.offset + m->vallen; break; case FILE_DEFAULT: case FILE_CLEAR: if (file_printf(ms, "%s", m->desc) == -1) return -1; t = ms->offset; break; case FILE_INDIRECT: case FILE_USE: case FILE_NAME: t = ms->offset; break; default: file_magerror(ms, "invalid m->type (%d) in mprint()", m->type); return -1; } return (int32_t)t; } private int32_t moffset(struct magic_set *ms, struct magic *m) { switch (m->type) { case FILE_BYTE: return CAST(int32_t, (ms->offset + sizeof(char))); case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: return CAST(int32_t, (ms->offset + sizeof(short))); case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: return CAST(int32_t, (ms->offset + sizeof(int32_t))); case FILE_QUAD: case FILE_BEQUAD: case FILE_LEQUAD: return CAST(int32_t, (ms->offset + sizeof(int64_t))); case FILE_STRING: case FILE_PSTRING: case FILE_BESTRING16: case FILE_LESTRING16: if (m->reln == '=' || m->reln == '!') return ms->offset + m->vallen; else { union VALUETYPE *p = &ms->ms_value; uint32_t t; if (*m->value.s == '\0') p->s[strcspn(p->s, "\n")] = '\0'; t = CAST(uint32_t, (ms->offset + strlen(p->s))); if (m->type == FILE_PSTRING) t += (uint32_t)file_pstring_length_size(m); return t; } case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: return CAST(int32_t, (ms->offset + sizeof(uint32_t))); case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: return CAST(int32_t, (ms->offset + sizeof(uint32_t))); case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: return CAST(int32_t, (ms->offset + sizeof(uint64_t))); case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: return CAST(int32_t, (ms->offset + sizeof(uint64_t))); case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: return CAST(int32_t, (ms->offset + sizeof(float))); case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: return CAST(int32_t, (ms->offset + sizeof(double))); case FILE_REGEX: if ((m->str_flags & REGEX_OFFSET_START) != 0) return CAST(int32_t, ms->search.offset); else return CAST(int32_t, (ms->search.offset + ms->search.rm_len)); case FILE_SEARCH: if ((m->str_flags & REGEX_OFFSET_START) != 0) return CAST(int32_t, ms->search.offset); else return CAST(int32_t, (ms->search.offset + m->vallen)); case FILE_CLEAR: case FILE_DEFAULT: case FILE_INDIRECT: return ms->offset; default: return 0; } } private int cvt_flip(int type, int flip) { if (flip == 0) return type; switch (type) { case FILE_BESHORT: return FILE_LESHORT; case FILE_BELONG: return FILE_LELONG; case FILE_BEDATE: return FILE_LEDATE; case FILE_BELDATE: return FILE_LELDATE; case FILE_BEQUAD: return FILE_LEQUAD; case FILE_BEQDATE: return FILE_LEQDATE; case FILE_BEQLDATE: return FILE_LEQLDATE; case FILE_BEQWDATE: return FILE_LEQWDATE; case FILE_LESHORT: return FILE_BESHORT; case FILE_LELONG: return FILE_BELONG; case FILE_LEDATE: return FILE_BEDATE; case FILE_LELDATE: return FILE_BELDATE; case FILE_LEQUAD: return FILE_BEQUAD; case FILE_LEQDATE: return FILE_BEQDATE; case FILE_LEQLDATE: return FILE_BEQLDATE; case FILE_LEQWDATE: return FILE_BEQWDATE; case FILE_BEFLOAT: return FILE_LEFLOAT; case FILE_LEFLOAT: return FILE_BEFLOAT; case FILE_BEDOUBLE: return FILE_LEDOUBLE; case FILE_LEDOUBLE: return FILE_BEDOUBLE; default: return type; } } #define DO_CVT(fld, cast) \ if (m->num_mask) \ switch (m->mask_op & FILE_OPS_MASK) { \ case FILE_OPAND: \ p->fld &= cast m->num_mask; \ break; \ case FILE_OPOR: \ p->fld |= cast m->num_mask; \ break; \ case FILE_OPXOR: \ p->fld ^= cast m->num_mask; \ break; \ case FILE_OPADD: \ p->fld += cast m->num_mask; \ break; \ case FILE_OPMINUS: \ p->fld -= cast m->num_mask; \ break; \ case FILE_OPMULTIPLY: \ p->fld *= cast m->num_mask; \ break; \ case FILE_OPDIVIDE: \ p->fld /= cast m->num_mask; \ break; \ case FILE_OPMODULO: \ p->fld %= cast m->num_mask; \ break; \ } \ if (m->mask_op & FILE_OPINVERSE) \ p->fld = ~p->fld \ private void cvt_8(union VALUETYPE *p, const struct magic *m) { DO_CVT(b, (uint8_t)); } private void cvt_16(union VALUETYPE *p, const struct magic *m) { DO_CVT(h, (uint16_t)); } private void cvt_32(union VALUETYPE *p, const struct magic *m) { DO_CVT(l, (uint32_t)); } private void cvt_64(union VALUETYPE *p, const struct magic *m) { DO_CVT(q, (uint64_t)); } #define DO_CVT2(fld, cast) \ if (m->num_mask) \ switch (m->mask_op & FILE_OPS_MASK) { \ case FILE_OPADD: \ p->fld += cast m->num_mask; \ break; \ case FILE_OPMINUS: \ p->fld -= cast m->num_mask; \ break; \ case FILE_OPMULTIPLY: \ p->fld *= cast m->num_mask; \ break; \ case FILE_OPDIVIDE: \ p->fld /= cast m->num_mask; \ break; \ } \ private void cvt_float(union VALUETYPE *p, const struct magic *m) { DO_CVT2(f, (float)); } private void cvt_double(union VALUETYPE *p, const struct magic *m) { DO_CVT2(d, (double)); } /* * Convert the byte order of the data we are looking at * While we're here, let's apply the mask operation * (unless you have a better idea) */ private int mconvert(struct magic_set *ms, struct magic *m, int flip) { union VALUETYPE *p = &ms->ms_value; switch (cvt_flip(m->type, flip)) { case FILE_BYTE: cvt_8(p, m); return 1; case FILE_SHORT: cvt_16(p, m); return 1; case FILE_LONG: case FILE_DATE: case FILE_LDATE: cvt_32(p, m); return 1; case FILE_QUAD: case FILE_QDATE: case FILE_QLDATE: case FILE_QWDATE: cvt_64(p, m); return 1; case FILE_STRING: case FILE_BESTRING16: case FILE_LESTRING16: { /* Null terminate and eat *trailing* return */ p->s[sizeof(p->s) - 1] = '\0'; return 1; } case FILE_PSTRING: { char *ptr1 = p->s, *ptr2 = ptr1 + file_pstring_length_size(m); size_t len = file_pstring_get_length(m, ptr1); if (len >= sizeof(p->s)) len = sizeof(p->s) - 1; while (len--) *ptr1++ = *ptr2++; *ptr1 = '\0'; return 1; } case FILE_BESHORT: p->h = (short)((p->hs[0]<<8)|(p->hs[1])); cvt_16(p, m); return 1; case FILE_BELONG: case FILE_BEDATE: case FILE_BELDATE: p->l = (int32_t) ((p->hl[0]<<24)|(p->hl[1]<<16)|(p->hl[2]<<8)|(p->hl[3])); cvt_32(p, m); return 1; case FILE_BEQUAD: case FILE_BEQDATE: case FILE_BEQLDATE: case FILE_BEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8)|((uint64_t)p->hq[7])); cvt_64(p, m); return 1; case FILE_LESHORT: p->h = (short)((p->hs[1]<<8)|(p->hs[0])); cvt_16(p, m); return 1; case FILE_LELONG: case FILE_LEDATE: case FILE_LELDATE: p->l = (int32_t) ((p->hl[3]<<24)|(p->hl[2]<<16)|(p->hl[1]<<8)|(p->hl[0])); cvt_32(p, m); return 1; case FILE_LEQUAD: case FILE_LEQDATE: case FILE_LEQLDATE: case FILE_LEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8)|((uint64_t)p->hq[0])); cvt_64(p, m); return 1; case FILE_MELONG: case FILE_MEDATE: case FILE_MELDATE: p->l = (int32_t) ((p->hl[1]<<24)|(p->hl[0]<<16)|(p->hl[3]<<8)|(p->hl[2])); cvt_32(p, m); return 1; case FILE_FLOAT: cvt_float(p, m); return 1; case FILE_BEFLOAT: p->l = ((uint32_t)p->hl[0]<<24)|((uint32_t)p->hl[1]<<16)| ((uint32_t)p->hl[2]<<8) |((uint32_t)p->hl[3]); cvt_float(p, m); return 1; case FILE_LEFLOAT: p->l = ((uint32_t)p->hl[3]<<24)|((uint32_t)p->hl[2]<<16)| ((uint32_t)p->hl[1]<<8) |((uint32_t)p->hl[0]); cvt_float(p, m); return 1; case FILE_DOUBLE: cvt_double(p, m); return 1; case FILE_BEDOUBLE: p->q = ((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8) |((uint64_t)p->hq[7]); cvt_double(p, m); return 1; case FILE_LEDOUBLE: p->q = ((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8) |((uint64_t)p->hq[0]); cvt_double(p, m); return 1; case FILE_REGEX: case FILE_SEARCH: case FILE_DEFAULT: case FILE_CLEAR: case FILE_NAME: case FILE_USE: return 1; default: file_magerror(ms, "invalid type %d in mconvert()", m->type); return 0; } } private void mdebug(uint32_t offset, const char *str, size_t len) { (void) fprintf(stderr, "mget/%zu @%d: ", len, offset); file_showstr(stderr, str, len); (void) fputc('\n', stderr); (void) fputc('\n', stderr); } private int mcopy(struct magic_set *ms, union VALUETYPE *p, int type, int indir, const unsigned char *s, uint32_t offset, size_t nbytes, size_t linecnt) { /* * Note: FILE_SEARCH and FILE_REGEX do not actually copy * anything, but setup pointers into the source */ if (indir == 0) { switch (type) { case FILE_SEARCH: ms->search.s = RCAST(const char *, s) + offset; ms->search.s_len = nbytes - offset; ms->search.offset = offset; return 0; case FILE_REGEX: { const char *b; const char *c; const char *last; /* end of search region */ const char *buf; /* start of search region */ const char *end; size_t lines; if (s == NULL) { ms->search.s_len = 0; ms->search.s = NULL; return 0; } buf = RCAST(const char *, s) + offset; end = last = RCAST(const char *, s) + nbytes; /* mget() guarantees buf <= last */ for (lines = linecnt, b = buf; lines && b < end && ((b = CAST(const char *, memchr(c = b, '\n', CAST(size_t, (end - b))))) || (b = CAST(const char *, memchr(c, '\r', CAST(size_t, (end - c)))))); lines--, b++) { last = b; if (b[0] == '\r' && b[1] == '\n') b++; } if (lines) last = RCAST(const char *, s) + nbytes; ms->search.s = buf; ms->search.s_len = last - buf; ms->search.offset = offset; ms->search.rm_len = 0; return 0; } case FILE_BESTRING16: case FILE_LESTRING16: { const unsigned char *src = s + offset; const unsigned char *esrc = s + nbytes; char *dst = p->s; char *edst = &p->s[sizeof(p->s) - 1]; if (type == FILE_BESTRING16) src++; /* check that offset is within range */ if (offset >= nbytes) break; for (/*EMPTY*/; src < esrc; src += 2, dst++) { if (dst < edst) *dst = *src; else break; if (*dst == '\0') { if (type == FILE_BESTRING16 ? *(src - 1) != '\0' : *(src + 1) != '\0') *dst = ' '; } } *edst = '\0'; return 0; } case FILE_STRING: /* XXX - these two should not need */ case FILE_PSTRING: /* to copy anything, but do anyway. */ default: break; } } if (offset >= nbytes) { (void)memset(p, '\0', sizeof(*p)); return 0; } if (nbytes - offset < sizeof(*p)) nbytes = nbytes - offset; else nbytes = sizeof(*p); (void)memcpy(p, s + offset, nbytes); /* * the usefulness of padding with zeroes eludes me, it * might even cause problems */ if (nbytes < sizeof(*p)) (void)memset(((char *)(void *)p) + nbytes, '\0', sizeof(*p) - nbytes); return 0; } private int mget(struct magic_set *ms, const unsigned char *s, struct magic *m, size_t nbytes, size_t o, unsigned int cont_level, int mode, int text, int flip, int recursion_level, int *printed_something, int *need_separator, int *returnval) { uint32_t soffset, offset = ms->offset; uint32_t count = m->str_range; uint32_t lhs; int rv, oneed_separator, in_type; char *sbuf, *rbuf; union VALUETYPE *p = &ms->ms_value; struct mlist ml; if (recursion_level >= 20) { file_error(ms, 0, "recursion nesting exceeded"); return -1; } if (mcopy(ms, p, m->type, m->flag & INDIR, s, (uint32_t)(offset + o), (uint32_t)nbytes, count) == -1) return -1; if ((ms->flags & MAGIC_DEBUG) != 0) { fprintf(stderr, "mget(type=%d, flag=%x, offset=%u, o=%zu, " "nbytes=%zu, count=%u)\n", m->type, m->flag, offset, o, nbytes, count); mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE)); #ifndef COMPILE_ONLY file_mdump(m); #endif } if (m->flag & INDIR) { int off = m->in_offset; if (m->in_op & FILE_OPINDIRECT) { const union VALUETYPE *q = CAST(const union VALUETYPE *, ((const void *)(s + offset + off))); switch (cvt_flip(m->in_type, flip)) { case FILE_BYTE: off = q->b; break; case FILE_SHORT: off = q->h; break; case FILE_BESHORT: off = (short)((q->hs[0]<<8)|(q->hs[1])); break; case FILE_LESHORT: off = (short)((q->hs[1]<<8)|(q->hs[0])); break; case FILE_LONG: off = q->l; break; case FILE_BELONG: case FILE_BEID3: off = (int32_t)((q->hl[0]<<24)|(q->hl[1]<<16)| (q->hl[2]<<8)|(q->hl[3])); break; case FILE_LEID3: case FILE_LELONG: off = (int32_t)((q->hl[3]<<24)|(q->hl[2]<<16)| (q->hl[1]<<8)|(q->hl[0])); break; case FILE_MELONG: off = (int32_t)((q->hl[1]<<24)|(q->hl[0]<<16)| (q->hl[3]<<8)|(q->hl[2])); break; } if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect offs=%u\n", off); } switch (in_type = cvt_flip(m->in_type, flip)) { case FILE_BYTE: if (OFFSET_OOB(nbytes, offset, 1)) return 0; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = p->b & off; break; case FILE_OPOR: offset = p->b | off; break; case FILE_OPXOR: offset = p->b ^ off; break; case FILE_OPADD: offset = p->b + off; break; case FILE_OPMINUS: offset = p->b - off; break; case FILE_OPMULTIPLY: offset = p->b * off; break; case FILE_OPDIVIDE: offset = p->b / off; break; case FILE_OPMODULO: offset = p->b % off; break; } } else offset = p->b; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_BESHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; lhs = (p->hs[0] << 8) | p->hs[1]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_LESHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; lhs = (p->hs[1] << 8) | p->hs[0]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_SHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = p->h & off; break; case FILE_OPOR: offset = p->h | off; break; case FILE_OPXOR: offset = p->h ^ off; break; case FILE_OPADD: offset = p->h + off; break; case FILE_OPMINUS: offset = p->h - off; break; case FILE_OPMULTIPLY: offset = p->h * off; break; case FILE_OPDIVIDE: offset = p->h / off; break; case FILE_OPMODULO: offset = p->h % off; break; } } else offset = p->h; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_BELONG: case FILE_BEID3: if (OFFSET_OOB(nbytes, offset, 4)) return 0; lhs = (p->hl[0] << 24) | (p->hl[1] << 16) | (p->hl[2] << 8) | p->hl[3]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_LELONG: case FILE_LEID3: if (OFFSET_OOB(nbytes, offset, 4)) return 0; lhs = (p->hl[3] << 24) | (p->hl[2] << 16) | (p->hl[1] << 8) | p->hl[0]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_MELONG: if (OFFSET_OOB(nbytes, offset, 4)) return 0; lhs = (p->hl[1] << 24) | (p->hl[0] << 16) | (p->hl[3] << 8) | p->hl[2]; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = lhs & off; break; case FILE_OPOR: offset = lhs | off; break; case FILE_OPXOR: offset = lhs ^ off; break; case FILE_OPADD: offset = lhs + off; break; case FILE_OPMINUS: offset = lhs - off; break; case FILE_OPMULTIPLY: offset = lhs * off; break; case FILE_OPDIVIDE: offset = lhs / off; break; case FILE_OPMODULO: offset = lhs % off; break; } } else offset = lhs; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; case FILE_LONG: if (OFFSET_OOB(nbytes, offset, 4)) return 0; if (off) { switch (m->in_op & FILE_OPS_MASK) { case FILE_OPAND: offset = p->l & off; break; case FILE_OPOR: offset = p->l | off; break; case FILE_OPXOR: offset = p->l ^ off; break; case FILE_OPADD: offset = p->l + off; break; case FILE_OPMINUS: offset = p->l - off; break; case FILE_OPMULTIPLY: offset = p->l * off; break; case FILE_OPDIVIDE: offset = p->l / off; break; case FILE_OPMODULO: offset = p->l % off; break; } } else offset = p->l; if (m->in_op & FILE_OPINVERSE) offset = ~offset; break; default: break; } switch (in_type) { case FILE_LEID3: case FILE_BEID3: offset = ((((offset >> 0) & 0x7f) << 0) | (((offset >> 8) & 0x7f) << 7) | (((offset >> 16) & 0x7f) << 14) | (((offset >> 24) & 0x7f) << 21)) + 10; break; default: break; } if (m->flag & INDIROFFADD) { offset += ms->c.li[cont_level-1].off; if (offset == 0) { if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect *zero* offset\n"); return 0; } if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect +offs=%u\n", offset); } if (mcopy(ms, p, m->type, 0, s, offset, nbytes, count) == -1) return -1; ms->offset = offset; if ((ms->flags & MAGIC_DEBUG) != 0) { mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE)); #ifndef COMPILE_ONLY file_mdump(m); #endif } } /* Verify we have enough data to match magic type */ switch (m->type) { case FILE_BYTE: if (OFFSET_OOB(nbytes, offset, 1)) return 0; break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: if (OFFSET_OOB(nbytes, offset, 2)) return 0; break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: if (OFFSET_OOB(nbytes, offset, 4)) return 0; break; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: if (OFFSET_OOB(nbytes, offset, 8)) return 0; break; case FILE_STRING: case FILE_PSTRING: case FILE_SEARCH: if (OFFSET_OOB(nbytes, offset, m->vallen)) return 0; break; case FILE_REGEX: if (nbytes < offset) return 0; break; case FILE_INDIRECT: if (offset == 0) return 0; if (nbytes < offset) return 0; sbuf = ms->o.buf; soffset = ms->offset; ms->o.buf = NULL; ms->offset = 0; rv = file_softmagic(ms, s + offset, nbytes - offset, recursion_level, BINTEST, text); if ((ms->flags & MAGIC_DEBUG) != 0) fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv); rbuf = ms->o.buf; ms->o.buf = sbuf; ms->offset = soffset; if (rv == 1) { if ((ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0 && file_printf(ms, F(ms, m, "%u"), offset) == -1) { free(rbuf); return -1; } if (file_printf(ms, "%s", rbuf) == -1) { free(rbuf); return -1; } } free(rbuf); return rv; case FILE_USE: if (nbytes < offset) return 0; sbuf = m->value.s; if (*sbuf == '^') { sbuf++; flip = !flip; } if (file_magicfind(ms, sbuf, &ml) == -1) { file_error(ms, 0, "cannot find entry `%s'", sbuf); return -1; } oneed_separator = *need_separator; if (m->flag & NOSPACE) *need_separator = 0; rv = match(ms, ml.magic, ml.nmagic, s, nbytes, offset + o, mode, text, flip, recursion_level, printed_something, need_separator, returnval); if (rv != 1) *need_separator = oneed_separator; return rv; case FILE_NAME: if (file_printf(ms, "%s", m->desc) == -1) return -1; return 1; case FILE_DEFAULT: /* nothing to check */ case FILE_CLEAR: default: break; } if (!mconvert(ms, m, flip)) return 0; return 1; } private uint64_t file_strncmp(const char *s1, const char *s2, size_t len, uint32_t flags) { /* * Convert the source args to unsigned here so that (1) the * compare will be unsigned as it is in strncmp() and (2) so * the ctype functions will work correctly without extra * casting. */ const unsigned char *a = (const unsigned char *)s1; const unsigned char *b = (const unsigned char *)s2; uint64_t v; /* * What we want here is v = strncmp(s1, s2, len), * but ignoring any nulls. */ v = 0; if (0L == flags) { /* normal string: do it fast */ while (len-- > 0) if ((v = *b++ - *a++) != '\0') break; } else { /* combine the others */ while (len-- > 0) { if ((flags & STRING_IGNORE_LOWERCASE) && islower(*a)) { if ((v = tolower(*b++) - *a++) != '\0') break; } else if ((flags & STRING_IGNORE_UPPERCASE) && isupper(*a)) { if ((v = toupper(*b++) - *a++) != '\0') break; } else if ((flags & STRING_COMPACT_WHITESPACE) && isspace(*a)) { a++; if (isspace(*b++)) { if (!isspace(*a)) while (isspace(*b)) b++; } else { v = 1; break; } } else if ((flags & STRING_COMPACT_OPTIONAL_WHITESPACE) && isspace(*a)) { a++; while (isspace(*b)) b++; } else { if ((v = *b++ - *a++) != '\0') break; } } } return v; } private uint64_t file_strncmp16(const char *a, const char *b, size_t len, uint32_t flags) { /* * XXX - The 16-bit string compare probably needs to be done * differently, especially if the flags are to be supported. * At the moment, I am unsure. */ flags = 0; return file_strncmp(a, b, len, flags); } private int magiccheck(struct magic_set *ms, struct magic *m) { uint64_t l = m->value.q; uint64_t v; float fl, fv; double dl, dv; int matched; union VALUETYPE *p = &ms->ms_value; switch (m->type) { case FILE_BYTE: v = p->b; break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: v = p->h; break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: v = p->l; break; case FILE_QUAD: case FILE_LEQUAD: case FILE_BEQUAD: case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: case FILE_QWDATE: case FILE_BEQWDATE: case FILE_LEQWDATE: v = p->q; break; case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: fl = m->value.f; fv = p->f; switch (m->reln) { case 'x': matched = 1; break; case '!': matched = fv != fl; break; case '=': matched = fv == fl; break; case '>': matched = fv > fl; break; case '<': matched = fv < fl; break; default: file_magerror(ms, "cannot happen with float: invalid relation `%c'", m->reln); return -1; } return matched; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: dl = m->value.d; dv = p->d; switch (m->reln) { case 'x': matched = 1; break; case '!': matched = dv != dl; break; case '=': matched = dv == dl; break; case '>': matched = dv > dl; break; case '<': matched = dv < dl; break; default: file_magerror(ms, "cannot happen with double: invalid relation `%c'", m->reln); return -1; } return matched; case FILE_DEFAULT: case FILE_CLEAR: l = 0; v = 0; break; case FILE_STRING: case FILE_PSTRING: l = 0; v = file_strncmp(m->value.s, p->s, (size_t)m->vallen, m->str_flags); break; case FILE_BESTRING16: case FILE_LESTRING16: l = 0; v = file_strncmp16(m->value.s, p->s, (size_t)m->vallen, m->str_flags); break; case FILE_SEARCH: { /* search ms->search.s for the string m->value.s */ size_t slen; size_t idx; if (ms->search.s == NULL) return 0; slen = MIN(m->vallen, sizeof(m->value.s)); l = 0; v = 0; for (idx = 0; m->str_range == 0 || idx < m->str_range; idx++) { if (slen + idx > ms->search.s_len) break; v = file_strncmp(m->value.s, ms->search.s + idx, slen, m->str_flags); if (v == 0) { /* found match */ ms->search.offset += idx; break; } } break; } case FILE_REGEX: { int rc; file_regex_t rx; if (ms->search.s == NULL) return 0; l = 0; rc = file_regcomp(&rx, m->value.s, REG_EXTENDED|REG_NEWLINE| ((m->str_flags & STRING_IGNORE_CASE) ? REG_ICASE : 0)); if (rc) { file_regerror(&rx, rc, ms); v = (uint64_t)-1; } else { #ifndef REG_STARTEND char c; #endif regmatch_t pmatch[1]; size_t slen = ms->search.s_len; /* Limit by offset if requested */ if (m->str_range > 0) slen = MIN(slen, m->str_range); #ifndef REG_STARTEND #define REG_STARTEND 0 if (slen != 0) slen--; c = ms->search.s[slen]; ((char *)(intptr_t)ms->search.s)[slen] = '\0'; #else pmatch[0].rm_so = 0; pmatch[0].rm_eo = slen; #endif rc = file_regexec(&rx, (const char *)ms->search.s, 1, pmatch, REG_STARTEND); #if REG_STARTEND == 0 ((char *)(intptr_t)ms->search.s)[l] = c; #endif switch (rc) { case 0: ms->search.s += (int)pmatch[0].rm_so; ms->search.offset += (size_t)pmatch[0].rm_so; ms->search.rm_len = (size_t)(pmatch[0].rm_eo - pmatch[0].rm_so); v = 0; break; case REG_NOMATCH: v = 1; break; default: file_regerror(&rx, rc, ms); v = (uint64_t)-1; break; } } file_regfree(&rx); if (v == (uint64_t)-1) return -1; break; } case FILE_INDIRECT: case FILE_USE: case FILE_NAME: return 1; default: file_magerror(ms, "invalid type %d in magiccheck()", m->type); return -1; } v = file_signextend(ms, m, v); switch (m->reln) { case 'x': if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u == *any* = 1\n", (unsigned long long)v); matched = 1; break; case '!': matched = v != l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u != %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); break; case '=': matched = v == l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u == %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); break; case '>': if (m->flag & UNSIGNED) { matched = v > l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u > %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); } else { matched = (int64_t) v > (int64_t) l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "d > %" INT64_T_FORMAT "d = %d\n", (long long)v, (long long)l, matched); } break; case '<': if (m->flag & UNSIGNED) { matched = v < l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "u < %" INT64_T_FORMAT "u = %d\n", (unsigned long long)v, (unsigned long long)l, matched); } else { matched = (int64_t) v < (int64_t) l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "%" INT64_T_FORMAT "d < %" INT64_T_FORMAT "d = %d\n", (long long)v, (long long)l, matched); } break; case '&': matched = (v & l) == l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %" INT64_T_FORMAT "x) == %" INT64_T_FORMAT "x) = %d\n", (unsigned long long)v, (unsigned long long)l, (unsigned long long)l, matched); break; case '^': matched = (v & l) != l; if ((ms->flags & MAGIC_DEBUG) != 0) (void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %" INT64_T_FORMAT "x) != %" INT64_T_FORMAT "x) = %d\n", (unsigned long long)v, (unsigned long long)l, (unsigned long long)l, matched); break; default: file_magerror(ms, "cannot happen: invalid relation `%c'", m->reln); return -1; } return matched; } private int handle_annotation(struct magic_set *ms, struct magic *m) { if (ms->flags & MAGIC_APPLE) { if (file_printf(ms, "%.8s", m->apple) == -1) return -1; return 1; } if ((ms->flags & MAGIC_MIME_TYPE) && m->mimetype[0]) { if (file_printf(ms, "%s", m->mimetype) == -1) return -1; return 1; } return 0; } private int print_sep(struct magic_set *ms, int firstline) { if (ms->flags & MAGIC_MIME) return 0; if (firstline) return 0; /* * we found another match * put a newline and '-' to do some simple formatting */ return file_printf(ms, "\n- "); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_2147_0
crossvul-cpp_data_bad_3622_1
/* * Kernel-based Virtual Machine driver for Linux * * derived from drivers/kvm/kvm_main.c * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2008 Qumranet, Inc. * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * Amit Shah <amit.shah@qumranet.com> * Ben-Ami Yassour <benami@il.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "i8254.h" #include "tss.h" #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/kvm.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/cpufreq.h> #include <linux/user-return-notifier.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <linux/pci.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "trace.h" #include <asm/debugreg.h> #include <asm/msr.h> #include <asm/desc.h> #include <asm/mtrr.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/xcr.h> #include <asm/pvclock.h> #include <asm/div64.h> #define MAX_IO_MSRS 256 #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) #define emul_to_vcpu(ctxt) \ container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM */ #ifdef CONFIG_X86_64 static u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); #else static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); #endif #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); static bool ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); bool kvm_has_tsc_control; EXPORT_SYMBOL_GPL(kvm_has_tsc_control); u32 kvm_max_guest_tsc_khz; EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { int nr; u32 msrs[KVM_NR_SHARED_MSRS]; }; struct kvm_shared_msrs { struct user_return_notifier urn; bool registered; struct kvm_shared_msr_values { u64 host; u64 curr; } values[KVM_NR_SHARED_MSRS]; }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_guest", VCPU_STAT(pf_guest) }, { "tlb_flush", VCPU_STAT(tlb_flush) }, { "invlpg", VCPU_STAT(invlpg) }, { "exits", VCPU_STAT(exits) }, { "io_exits", VCPU_STAT(io_exits) }, { "mmio_exits", VCPU_STAT(mmio_exits) }, { "signal_exits", VCPU_STAT(signal_exits) }, { "irq_window", VCPU_STAT(irq_window_exits) }, { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, { "efer_reload", VCPU_STAT(efer_reload) }, { "fpu_reload", VCPU_STAT(fpu_reload) }, { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, { "mmu_flooded", VM_STAT(mmu_flooded) }, { "mmu_recycled", VM_STAT(mmu_recycled) }, { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, { NULL } }; u64 __read_mostly host_xcr0; int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) vcpu->arch.apf.gfns[i] = ~0; } static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { wrmsrl(shared_msrs_global.msrs[slot], values->host); values->curr = values->host; } } locals->registered = false; user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) { struct kvm_shared_msrs *smsr; u64 value; smsr = &__get_cpu_var(shared_msrs); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; } void kvm_define_shared_msr(unsigned slot, u32 msr) { if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; shared_msrs_global.msrs[slot] = msr; /* we need ensured the shared_msr_global have been updated */ smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { unsigned i; for (i = 0; i < shared_msrs_global.nr; ++i) shared_msr_update(i, shared_msrs_global.msrs[i]); } void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); if (((value ^ smsr->values[slot].curr) & mask) == 0) return; smsr->values[slot].curr = value; wrmsrl(shared_msrs_global.msrs[slot], value); if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); static void drop_user_return_notifiers(void *ignore) { struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); if (smsr->registered) kvm_on_user_return(&smsr->urn); } u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return vcpu->arch.apic_base; else return vcpu->arch.apic_base; } EXPORT_SYMBOL_GPL(kvm_get_apic_base); void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) { /* TODO: reserve bits check */ if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_base(vcpu, data); else vcpu->arch.apic_base = data; } EXPORT_SYMBOL_GPL(kvm_set_apic_base); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2 static int exception_class(int vector) { switch (vector) { case PF_VECTOR: return EXCPT_PF; case DE_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: return EXCPT_CONTRIBUTORY; default: break; } return EXCPT_BENIGN; } static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool reinject) { u32 prev_nr; int class1, class2; kvm_make_request(KVM_REQ_EVENT, vcpu); if (!vcpu->arch.exception.pending) { queue: vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.reinject = reinject; return; } /* to check exception */ prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { /* generate double fault per SDM Table 5-5 */ vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost exception */ goto queue; } void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) kvm_inject_gp(vcpu, 0); else kvm_x86_ops->skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } EXPORT_SYMBOL_GPL(kvm_inject_page_fault); void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else vcpu->arch.mmu.inject_page_fault(vcpu, fault); } void kvm_inject_nmi(struct kvm_vcpu *vcpu) { atomic_inc(&vcpu->arch.nmi_queued); kvm_make_request(KVM_REQ_NMI, vcpu); } EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; } EXPORT_SYMBOL_GPL(kvm_require_cpl); /* * This function will be used to read from the physical memory of the currently * running guest. The difference to kvm_read_guest_page is that this function * can read from guest physical or from the guest's guest physical memory. */ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t ngfn, void *data, int offset, int len, u32 access) { gfn_t real_gfn; gpa_t ngpa; ngpa = gfn_to_gpa(ngfn); real_gfn = mmu->translate_gpa(vcpu, ngpa, access); if (real_gfn == UNMAPPED_GVA) return -EFAULT; real_gfn = gpa_to_gfn(real_gfn); return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len, u32 access) { return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, data, offset, len, access); } /* * Load the pae pdptrs. Return true is they are all valid. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, offset * sizeof(u64), sizeof(pdpte), PFERR_USER_MASK|PFERR_WRITE_MASK); if (ret < 0) { ret = 0; goto out; } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { ret = 0; goto out; } } ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); out: return ret; } EXPORT_SYMBOL_GPL(load_pdptrs); static bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; gfn_t gfn; int r; if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true; gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) goto out; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; } int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW; cr0 |= X86_CR0_ET; #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) return 1; #endif cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) return 1; if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) return 1; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; } kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); } if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; xcr0 = xcr; if (kvm_x86_ops->get_cpl(vcpu) != 0) return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; if (xcr0 & ~host_xcr0) return 1; vcpu->arch.xcr0 = xcr0; vcpu->guest_xcr0_loaded = 0; return 0; } int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (__kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_set_xcr); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) return 1; if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS)) return 1; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; if ((cr4 ^ old_cr4) & pdptr_bits) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr4); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_mmu_flush_tlb(vcpu); return 0; } if (is_long_mode(vcpu)) { if (cr3 & CR3_L_MODE_RESERVED_BITS) return 1; } else { if (is_pae(vcpu)) { if (cr3 & CR3_PAE_RESERVED_BITS) return 1; if (is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; } /* * We don't check reserved bits in nonpae mode, because * this isn't enforced, and VMware depends on this. */ } /* * Does the new cr3 value map to physical memory? (Note, we * catch an invalid cr3 even in real-mode, because it would * cause trouble later on when we turn on paging anyway.) * * A real CPU would silently accept an invalid cr3 and would * attempt to use it - with largely undefined (and often hard * to debug) behavior on the guest side. */ if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) return 1; vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); vcpu->arch.mmu.new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr8); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return kvm_lapic_get_cr8(vcpu); else return vcpu->arch.cr8; } EXPORT_SYMBOL_GPL(kvm_get_cr8); static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ case 6: if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ default: /* 7 */ if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7); vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK); } break; } return 0; } int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { int res; res = __kvm_set_dr(vcpu, dr, val); if (res > 0) kvm_queue_exception(vcpu, UD_VECTOR); else if (res < 0) kvm_inject_gp(vcpu, 0); return res; } EXPORT_SYMBOL_GPL(kvm_set_dr); static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: *val = vcpu->arch.dr6; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; } int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { if (_kvm_get_dr(vcpu, dr, val)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); bool kvm_rdpmc(struct kvm_vcpu *vcpu) { u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); u64 data; int err; err = kvm_pmu_read_pmc(vcpu, ecx, &data); if (err) return err; kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); return err; } EXPORT_SYMBOL_GPL(kvm_rdpmc); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * * This list is modified at module load time to reflect the * capabilities of the host cpu. This capabilities test skips MSRs that are * kvm-specific. Those are put in the beginning of the list. */ #define KVM_SAVE_MSRS_BEGIN 9 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA }; static unsigned num_msrs_to_save; static u32 emulated_msrs[] = { MSR_IA32_TSCDEADLINE, MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, }; static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (efer & efer_reserved_bits) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return 1; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return 1; } efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; } void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { return kvm_x86_ops->set_msr(vcpu, msr_index, data); } /* * Adapt set_msr() to msr_io()'s calling convention */ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { return kvm_set_msr(vcpu, index, *data); } static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { uint32_t quotient, remainder; /* Don't try to replace with do_div(), this one calculates * "(dividend << 32) / divisor" */ __asm__ ( "divl %4" : "=a" (quotient), "=d" (remainder) : "0" (0), "1" (dividend), "r" (divisor) ); return quotient; } static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; int32_t shift = 0; uint64_t tps64; uint32_t tps32; tps64 = base_khz * 1000LL; scaled64 = scaled_khz * 1000LL; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; } tps32 = (uint32_t)tps64; while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) scaled64 >>= 1; else tps32 <<= 1; shift++; } *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", __func__, base_khz, scaled_khz, shift, *pmultiplier); } static inline u64 get_kernel_ns(void) { struct timespec ts; WARN_ON(preemptible()); ktime_get_ts(&ts); monotonic_to_bootbased(&ts); return timespec_to_ns(&ts); } static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); unsigned long max_tsc_khz; static inline int kvm_tsc_changes_freq(void) { int cpu = get_cpu(); int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && cpufreq_quick_get(cpu) != 0; put_cpu(); return ret; } u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu) { if (vcpu->arch.virtual_tsc_khz) return vcpu->arch.virtual_tsc_khz; else return __this_cpu_read(cpu_tsc_khz); } static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { u64 ret; WARN_ON(preemptible()); if (kvm_tsc_changes_freq()) printk_once(KERN_WARNING "kvm: unreliable cycle conversion on adjustable rate TSC\n"); ret = nsec * vcpu_tsc_khz(vcpu); do_div(ret, USEC_PER_SEC); return ret; } static void kvm_init_tsc_catchup(struct kvm_vcpu *vcpu, u32 this_tsc_khz) { /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, &vcpu->arch.tsc_catchup_shift, &vcpu->arch.tsc_catchup_mult); } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec, vcpu->arch.tsc_catchup_mult, vcpu->arch.tsc_catchup_shift); tsc += vcpu->arch.last_tsc_write; return tsc; } void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 sdiff; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; sdiff = data - kvm->arch.last_tsc_write; if (sdiff < 0) sdiff = -sdiff; /* * Special case: close write to TSC within 5 seconds of * another CPU is interpreted as an attempt to synchronize * The 5 seconds is to accommodate host load / swapping as * well as any reset of TSC during the boot process. * * In that case, for a reliable TSC, we can match TSC offsets, * or make a best guest using elapsed value. */ if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) && elapsed < 5ULL * NSEC_PER_SEC) { if (!check_tsc_unstable()) { offset = kvm->arch.last_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); offset += delta; pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } ns = kvm->arch.last_tsc_nsec; } kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_offset = offset; kvm_x86_ops->write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); /* Reset of TSC must disable overshoot protection below */ vcpu->arch.hv_clock.tsc_timestamp = 0; vcpu->arch.last_tsc_write = data; vcpu->arch.last_tsc_nsec = ns; } EXPORT_SYMBOL_GPL(kvm_write_tsc); static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags; struct kvm_vcpu_arch *vcpu = &v->arch; void *shared_kaddr; unsigned long this_tsc_khz; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp; /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); tsc_timestamp = kvm_x86_ops->read_l1_tsc(v); kernel_ns = get_kernel_ns(); this_tsc_khz = vcpu_tsc_khz(v); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; } /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate * 2) Broken TSC compensation resets the base at each VCPU * entry to avoid unknown leaps of TSC even when running * again on the same CPU. This may cause apparent elapsed * time to disappear, and the guest to stand still or run * very slowly. */ if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } local_irq_restore(flags); if (!vcpu->time_page) return 0; /* * Time as measured by the TSC may go backwards when resetting the base * tsc_timestamp. The reason for this is that the TSC resolution is * higher than the resolution of the other clock scales. Thus, many * possible measurments of the TSC correspond to one measurement of any * other clock, and so a spread of values is possible. This is not a * problem for the computation of the nanosecond clock; with TSC rates * around 1GHZ, there can only be a few cycles which correspond to one * nanosecond value, and any path through this code will inevitably * take longer than that. However, with the kernel_ns value itself, * the precision may be much lower, down to HZ granularity. If the * first sampling of TSC against kernel_ns ends in the low part of the * range, and the second in the high end of the range, we can get: * * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new * * As the sampling errors potentially range in the thousands of cycles, * it is possible such a time value has already been observed by the * guest. To protect against this, we must compute the system time as * observed by the guest and ensure the new system time is greater. */ max_kernel_ns = 0; if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) { max_kernel_ns = vcpu->last_guest_tsc - vcpu->hv_clock.tsc_timestamp; max_kernel_ns = pvclock_scale_delta(max_kernel_ns, vcpu->hv_clock.tsc_to_system_mul, vcpu->hv_clock.tsc_shift); max_kernel_ns += vcpu->last_kernel_ns; } if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = this_tsc_khz; } if (max_kernel_ns > kernel_ns) kernel_ns = max_kernel_ns; /* With all the info we got, fill in the values */ vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_kernel_ns = kernel_ns; vcpu->last_guest_tsc = tsc_timestamp; vcpu->hv_clock.flags = 0; /* * The interface expects us to write an even number signaling that the * update is finished. Since the guest won't see the intermediate * state, we just increase by 2 at the end. */ vcpu->hv_clock.version += 2; shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); kunmap_atomic(shared_kaddr, KM_USER0); mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); return 0; } static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; case 0x2f8: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ return valid_mtrr_type(data & 0xff); } static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!mtrr_valid(vcpu, msr, data)) return 1; if (msr == MSR_MTRRdefType) { vcpu->arch.mtrr_state.def_type = data; vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; } else if (msr == MSR_MTRRfix64K_00000) p[0] = data; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) p[1 + msr - MSR_MTRRfix16K_80000] = data; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) p[3 + msr - MSR_MTRRfix4K_C0000] = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pt = data; } kvm_mmu_reset_context(vcpu); return 0; } static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_MCG_STATUS: vcpu->arch.mcg_status = data; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; if (data != 0 && data != ~(u64)0) return -1; vcpu->arch.mcg_ctl = data; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore * this to avoid an uncatched #GP in the guest */ if ((offset & 0x3) == 0 && data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; } return 1; } return 0; } static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; u8 *page; int r; r = -E2BIG; if (page_num >= blob_size) goto out; r = -ENOMEM; page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); if (IS_ERR(page)) { r = PTR_ERR(page); goto out; } if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) goto out_free; r = 0; out_free: kfree(page); out: return r; } static bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; } static bool kvm_hv_msr_partition_wide(u32 msr) { bool r = false; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_HYPERCALL: r = true; break; } return r; } static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: kvm->arch.hv_guest_os_id = data; /* setting guest os id to zero disables hypercall page */ if (!kvm->arch.hv_guest_os_id) kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { u64 gfn; unsigned long addr; u8 instructions[4]; /* if guest os id is not set hypercall should remain disabled */ if (!kvm->arch.hv_guest_os_id) break; if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { kvm->arch.hv_hypercall = data; break; } gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; kvm_x86_ops->patch_hypercall(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; break; } default: pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; break; } addr = gfn_to_hva(vcpu->kvm, data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); if (kvm_is_error_hva(addr)) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; /* Bits 2:5 are resrved, Should be zero */ if (data & 0x3c) return 1; vcpu->arch.apf.msr_val = data; if (!(data & KVM_ASYNC_PF_ENABLED)) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); return 0; } if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); kvm_async_pf_wakeup_all(vcpu); return 0; } static void kvmclock_reset(struct kvm_vcpu *vcpu) { if (vcpu->arch.time_page) { kvm_release_page_dirty(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } } static void accumulate_steal_time(struct kvm_vcpu *vcpu) { u64 delta; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; vcpu->arch.st.accum_steal = delta; } static void record_steal_time(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) return; vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; vcpu->arch.st.steal.version += 2; vcpu->arch.st.accum_steal = 0; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); } int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { bool pr = false; switch (msr) { case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ if (data != 0) { pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " "0x%llx\n", data); return 1; } break; case MSR_AMD64_NB_CFG: break; case MSR_IA32_DEBUGCTLMSR: if (!data) { /* We support the non-activated case already */ break; } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { /* Values other than LBR and BTF are vendor-specific, thus reserved and should throw a #GP */ return 1; } pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", __func__, data); break; case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: kvm_set_apic_base(vcpu, data); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); break; case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data); break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { kvmclock_reset(vcpu); vcpu->arch.time = data; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ if (!(data & 1)) break; /* ...but clean it before doing the actual write */ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); vcpu->arch.time_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); if (is_error_page(vcpu->arch.time_page)) { kvm_release_page_clean(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } break; } case MSR_KVM_ASYNC_PF_EN: if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; case MSR_KVM_STEAL_TIME: if (unlikely(!sched_info_on())) return 1; if (data & KVM_STEAL_RESERVED_MASK) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, data & KVM_STEAL_VALID_BITS)) return 1; vcpu->arch.st.msr_val = data; if (!(data & KVM_MSR_ENABLED)) break; vcpu->arch.st.last_steal = current->sched_info.run_delay; preempt_disable(); accumulate_steal_time(vcpu); preempt_enable(); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return set_msr_mce(vcpu, msr, data); /* Performance counters are not protected by a CPUID bit, * so we should check all of them in the generic path for the sake of * cross vendor migration. * Writing a zero into the event select MSRs disables them, * which we perfectly emulate ;-). Any other value should be at least * reported, some guests depend on them. */ case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: if (data != 0) pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; /* at least RHEL 4 unconditionally writes to the perfctr registers, * so we ignore writes to make it happy. */ case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: pr = true; case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr, data); if (pr || data != 0) pr_unimpl(vcpu, "disabled perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. * Writes are only relevant for old K7 processors, * all pre-dating SVM, but a recommended workaround from * AMD for these chips. It is possible to speicify the * affected processor models on the command line, hence * the need to ignore the workaround. */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = set_msr_hyperv_pw(vcpu, msr, data); mutex_unlock(&vcpu->kvm->lock); return r; } else return set_msr_hyperv(vcpu, msr, data); break; case MSR_IA32_BBL_CR_CTL3: /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. */ pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr, data); if (!ignore_msrs) { pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); return 1; } else { pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; } } return 0; } EXPORT_SYMBOL_GPL(kvm_set_msr_common); /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); } static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!msr_mtrr_valid(msr)) return 1; if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.def_type + (vcpu->arch.mtrr_state.enabled << 10); else if (msr == MSR_MTRRfix64K_00000) *pdata = p[0]; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pdata = *pt; } return 0; } static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: data = 0; break; case MSR_IA32_MCG_CAP: data = vcpu->arch.mcg_cap; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: data = vcpu->arch.mcg_status; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; data = vcpu->arch.mce_banks[offset]; break; } return 1; } *pdata = data; return 0; } static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: data = kvm->arch.hv_guest_os_id; break; case HV_X64_MSR_HYPERCALL: data = kvm->arch.hv_hypercall; break; default: pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; switch (msr) { case HV_X64_MSR_VP_INDEX: { int r; struct kvm_vcpu *v; kvm_for_each_vcpu(r, v, vcpu->kvm) if (v == vcpu) data = r; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); case HV_X64_MSR_APIC_ASSIST_PAGE: data = vcpu->arch.hv_vapic; break; default: pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; switch (msr) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_K7_EVNTSEL0: case MSR_K7_PERFCTR0: case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: data = 0; break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); data = 0; break; case MSR_IA32_UCODE_REV: data = 0x100000000ULL; break; case MSR_MTRRcap: data = 0x500 | KVM_NR_VAR_MTRR; break; case 0x200 ... 0x2ff: return get_msr_mtrr(vcpu, msr, pdata); case 0xcd: /* fsb frequency */ data = 3; break; /* * MSR_EBC_FREQUENCY_ID * Conservative value valid for even the basic CPU models. * Models 0,1: 000 in bits 23:21 indicating a bus speed of * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, * and 266MHz for model 3, or 4. Set Core Clock * Frequency to System Bus Frequency Ratio to 1 (bits * 31:24) even though these are only valid for CPU * models > 2, however guests may end up dividing or * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: data = 1 << 24; break; case MSR_IA32_APICBASE: data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_read(vcpu, msr, pdata); break; case MSR_IA32_TSCDEADLINE: data = kvm_get_lapic_tscdeadline_msr(vcpu); break; case MSR_IA32_MISC_ENABLE: data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ data = 1000ULL; /* CPU multiplier */ data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: data = vcpu->arch.apf.msr_val; break; case MSR_KVM_STEAL_TIME: data = vcpu->arch.st.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return get_msr_mce(vcpu, msr, pdata); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other * are set to zero, indicating minimum divisors for * every field. * * This prevents guest kernels on AMD host with CPU * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = get_msr_hyperv_pw(vcpu, msr, pdata); mutex_unlock(&vcpu->kvm->lock); return r; } else return get_msr_hyperv(vcpu, msr, pdata); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current * silicon. It is however accessed by winxp in very narrow * scenarios where it sets bit #19, itself documented as * a "reserved" bit. Best effort attempt to source coherent * read data here should the balance of the register be * interpreted by the guest: * * L2 cache control register 3: 64GB range, 256KB size, * enabled, latency 0x1, configured */ data = 0xbe702111; break; default: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); if (!ignore_msrs) { pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); return 1; } else { pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); data = 0; } break; } *pdata = data; return 0; } EXPORT_SYMBOL_GPL(kvm_get_msr_common); /* * Read or write a bunch of msrs. All parameters are kernel addresses. * * @return number of msrs set successfully. */ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_msr_entry *entries, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { int i, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } /* * Read or write a bunch of msrs. Parameters are user addresses. * * @return number of msrs set successfully. */ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data), int writeback) { struct kvm_msrs msrs; struct kvm_msr_entry *entries; int r, n; unsigned size; r = -EFAULT; if (copy_from_user(&msrs, user_msrs, sizeof msrs)) goto out; r = -E2BIG; if (msrs.nmsrs >= MAX_IO_MSRS) goto out; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; entries = memdup_user(user_msrs->entries, size); if (IS_ERR(entries)) { r = PTR_ERR(entries); goto out; } r = n = __msr_io(vcpu, &msrs, entries, do_msr); if (r < 0) goto out_free; r = -EFAULT; if (writeback && copy_to_user(user_msrs->entries, entries, size)) goto out_free; r = n; out_free: kfree(entries); out: return r; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_MEMORY_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_TSC_DEADLINE_TIMER: r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); break; default: r = 0; break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_GET_MSR_INDEX_LIST: { struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; r = -EFAULT; if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) goto out; r = -E2BIG; if (n < msr_list.nmsrs) goto out; r = -EFAULT; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; if (copy_to_user(user_msr_list->indices + num_msrs_to_save, &emulated_msrs, ARRAY_SIZE(emulated_msrs) * sizeof(u32))) goto out; r = 0; break; } case KVM_GET_SUPPORTED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_X86_GET_MCE_CAP_SUPPORTED: { u64 mce_cap; mce_cap = KVM_MCE_CAP_SUPPORTED; r = -EFAULT; if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) goto out; r = 0; break; } default: r = -EINVAL; } out: return r; } static void wbinvd_ipi(void *garbage) { wbinvd(); } static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.iommu_domain && !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { /* Make sure TSC doesn't go backwards */ s64 tsc_delta; u64 tsc; tsc = kvm_x86_ops->read_l1_tsc(vcpu); tsc_delta = !vcpu->arch.last_guest_tsc ? 0 : tsc - vcpu->arch.last_guest_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta); vcpu->arch.tsc_catchup = 1; } kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } accumulate_steal_time(vcpu); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; } static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); kvm_apic_post_state_restore(vcpu); update_cr8_intercept(vcpu); return 0; } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq < 0 || irq->irq >= 256) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; kvm_queue_interrupt(vcpu, irq->irq, false); kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); return 0; } static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { if (tac->flags) return -EINVAL; vcpu->arch.tpr_access_reporting = !!tac->enabled; return 0; } static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, u64 mcg_cap) { int r; unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) goto out; r = 0; vcpu->arch.mcg_cap = mcg_cap; /* Init IA32_MCG_CTL to all 1s */ if (mcg_cap & MCG_CTL_P) vcpu->arch.mcg_ctl = ~(u64)0; /* Init IA32_MCi_CTL to all 1s */ for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; out: return r; } static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; u64 *banks = vcpu->arch.mce_banks; if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) return -EINVAL; /* * if IA32_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && vcpu->arch.mcg_ctl != ~(u64)0) return 0; banks += 4 * mce->bank; /* * if IA32_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; vcpu->arch.mcg_status = mce->mcg_status; banks[1] = mce->status; kvm_queue_exception(vcpu, MC_VECTOR); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; banks[1] = mce->status; } else banks[1] |= MCI_STATUS_OVER; return 0; } static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = vcpu->arch.sipi_vector; events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); } static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW)) return -EINVAL; process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.interrupt.pending = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) vcpu->arch.sipi_vector = events->sipi_vector; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { if (dbgregs->flags) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = dbgregs->dr6; vcpu->arch.dr7 = dbgregs->dr7; return 0; } static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { if (cpu_has_xsave) memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->xsave, xstate_size); else { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct i387_fxsave_struct)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XSTATE_FPSSE; } } static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, xstate_size); else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; } static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { if (!cpu_has_xsave) { guest_xcrs->nr_xcrs = 0; return; } guest_xcrs->nr_xcrs = 1; guest_xcrs->flags = 0; guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; } static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[0].value); break; } if (r) r = -EINVAL; return r; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; union { struct kvm_lapic_state *lapic; struct kvm_xsave *xsave; struct kvm_xcrs *xcrs; void *buffer; } u; u.buffer = NULL; switch (ioctl) { case KVM_GET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) goto out; r = 0; break; } case KVM_SET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = memdup_user(argp, sizeof(*u.lapic)); if (IS_ERR(u.lapic)) { r = PTR_ERR(u.lapic); goto out; } r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); if (r) goto out; r = 0; break; } case KVM_INTERRUPT: { struct kvm_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof irq)) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); if (r) goto out; r = 0; break; } case KVM_NMI: { r = kvm_vcpu_ioctl_nmi(vcpu); if (r) goto out; r = 0; break; } case KVM_SET_CPUID: { struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; break; } case KVM_SET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; break; } case KVM_GET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_GET_MSRS: r = msr_io(vcpu, argp, kvm_get_msr, 1); break; case KVM_SET_MSRS: r = msr_io(vcpu, argp, do_set_msr, 0); break; case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; r = -EFAULT; if (copy_from_user(&tac, argp, sizeof tac)) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tac, sizeof tac)) goto out; r = 0; break; }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; r = -EINVAL; if (!irqchip_in_kernel(vcpu->kvm)) goto out; r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; r = 0; kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { u64 mcg_cap; r = -EFAULT; if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; } case KVM_X86_SET_MCE: { struct kvm_x86_mce mce; r = -EFAULT; if (copy_from_user(&mce, argp, sizeof mce)) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; } case KVM_GET_VCPU_EVENTS: { struct kvm_vcpu_events events; kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); r = -EFAULT; if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) break; r = 0; break; } case KVM_SET_VCPU_EVENTS: { struct kvm_vcpu_events events; r = -EFAULT; if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) break; r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); break; } case KVM_GET_DEBUGREGS: { struct kvm_debugregs dbgregs; kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); r = -EFAULT; if (copy_to_user(argp, &dbgregs, sizeof(struct kvm_debugregs))) break; r = 0; break; } case KVM_SET_DEBUGREGS: { struct kvm_debugregs dbgregs; r = -EFAULT; if (copy_from_user(&dbgregs, argp, sizeof(struct kvm_debugregs))) break; r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); break; } case KVM_GET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); r = -EFAULT; if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) break; r = 0; break; } case KVM_SET_XSAVE: { u.xsave = memdup_user(argp, sizeof(*u.xsave)); if (IS_ERR(u.xsave)) { r = PTR_ERR(u.xsave); goto out; } r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; } case KVM_GET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); r = -EFAULT; if (copy_to_user(argp, u.xcrs, sizeof(struct kvm_xcrs))) break; r = 0; break; } case KVM_SET_XCRS: { u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); if (IS_ERR(u.xcrs)) { r = PTR_ERR(u.xcrs); goto out; } r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); break; } case KVM_SET_TSC_KHZ: { u32 user_tsc_khz; r = -EINVAL; if (!kvm_has_tsc_control) break; user_tsc_khz = (u32)arg; if (user_tsc_khz >= kvm_max_guest_tsc_khz) goto out; kvm_x86_ops->set_tsc_khz(vcpu, user_tsc_khz); r = 0; goto out; } case KVM_GET_TSC_KHZ: { r = -EIO; if (check_tsc_unstable()) goto out; r = vcpu_tsc_khz(vcpu); goto out; } default: r = -EINVAL; } out: kfree(u.buffer); return r; } static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) { int ret; if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -1; ret = kvm_x86_ops->set_tss_addr(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { kvm->arch.ept_identity_map_addr = ident_addr; return 0; } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); spin_lock(&kvm->mmu_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; spin_unlock(&kvm->mmu_lock); mutex_unlock(&kvm->slots_lock); return 0; } static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) { return kvm->arch.n_max_mmu_pages; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[0], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_PIC_SLAVE: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[1], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_PIC_SLAVE: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } kvm_pic_update_irq(pic_irqchip(kvm)); return r; } static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0, start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } /** * write_protect_slot - write protect a slot for dirty logging * @kvm: the kvm instance * @memslot: the slot we protect * @dirty_bitmap: the bitmap indicating which pages are dirty * @nr_dirty_pages: the number of dirty pages * * We have two ways to find all sptes to protect: * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and * checks ones that have a spte mapping a page in the slot. * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap. * * Generally speaking, if there are not so many dirty pages compared to the * number of shadow pages, we should use the latter. * * Note that letting others write into a page marked dirty in the old bitmap * by using the remaining tlb entry is not a problem. That page will become * write protected again when we flush the tlb and then be reported dirty to * the user space by copying the old bitmap. */ static void write_protect_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *dirty_bitmap, unsigned long nr_dirty_pages) { spin_lock(&kvm->mmu_lock); /* Not many dirty pages compared to # of shadow pages. */ if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) { unsigned long gfn_offset; for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) { unsigned long gfn = memslot->base_gfn + gfn_offset; kvm_mmu_rmap_write_protect(kvm, gfn, memslot); } kvm_flush_remote_tlbs(kvm); } else kvm_mmu_slot_remove_write_access(kvm, memslot->id); spin_unlock(&kvm->mmu_lock); } /* * Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; struct kvm_memory_slot *memslot; unsigned long n, nr_dirty_pages; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_MEMORY_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); r = -ENOENT; if (!memslot->dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); nr_dirty_pages = memslot->nr_dirty_pages; /* If nothing is dirty, don't bother messing with page tables. */ if (nr_dirty_pages) { struct kvm_memslots *slots, *old_slots; unsigned long *dirty_bitmap, *dirty_bitmap_head; dirty_bitmap = memslot->dirty_bitmap; dirty_bitmap_head = memslot->dirty_bitmap_head; if (dirty_bitmap == dirty_bitmap_head) dirty_bitmap_head += n / sizeof(long); memset(dirty_bitmap_head, 0, n); r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL); if (!slots) goto out; memslot = id_to_memslot(slots, log->slot); memslot->nr_dirty_pages = 0; memslot->dirty_bitmap = dirty_bitmap_head; update_memslots(slots, NULL); old_slots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); kfree(old_slots); write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) goto out; } else { r = -EFAULT; if (clear_user(log->dirty_bitmap, n)) goto out; } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; /* * This union makes it completely explicit to gcc-3.x * that these two variables' stack usage should be * combined, not added together. */ union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; switch (ioctl) { case KVM_SET_TSS_ADDR: r = kvm_vm_ioctl_set_tss_addr(kvm, arg); if (r < 0) goto out; break; case KVM_SET_IDENTITY_MAP_ADDR: { u64 ident_addr; r = -EFAULT; if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) goto out; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); if (r < 0) goto out; break; } case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); if (r) goto out; break; case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; mutex_lock(&kvm->lock); r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { r = kvm_ioapic_init(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); mutex_unlock(&kvm->slots_lock); kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; smp_wmb(); kvm->arch.vpic = vpic; smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->slots_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); break; } case KVM_CREATE_PIT: u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; goto create_pit; case KVM_CREATE_PIT2: r = -EFAULT; if (copy_from_user(&u.pit_config, argp, sizeof(struct kvm_pit_config))) goto out; create_pit: mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); if (kvm->arch.vpit) r = 0; create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; r = -ENXIO; if (irqchip_in_kernel(kvm)) { __s32 status; status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); if (ioctl == KVM_IRQ_LINE_STATUS) { r = -EFAULT; irq_event.status = status; if (copy_to_user(argp, &irq_event, sizeof irq_event)) goto out; } r = 0; } break; } case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto get_irqchip_out; r = kvm_vm_ioctl_get_irqchip(kvm, chip); if (r) goto get_irqchip_out; r = -EFAULT; if (copy_to_user(argp, chip, sizeof *chip)) goto get_irqchip_out; r = 0; get_irqchip_out: kfree(chip); if (r) goto out; break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); if (r) goto set_irqchip_out; r = 0; set_irqchip_out: kfree(chip); if (r) goto out; break; } case KVM_GET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit(kvm, &u.ps); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) goto out; r = 0; break; } case KVM_SET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); if (r) goto out; r = 0; break; } case KVM_GET_PIT2: { r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) goto out; r = 0; break; } case KVM_SET_PIT2: { r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); if (r) goto out; r = 0; break; } case KVM_REINJECT_CONTROL: { struct kvm_reinject_control control; r = -EFAULT; if (copy_from_user(&control, argp, sizeof(control))) goto out; r = kvm_vm_ioctl_reinject(kvm, &control); if (r) goto out; r = 0; break; } case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, sizeof(struct kvm_xen_hvm_config))) goto out; r = -EINVAL; if (kvm->arch.xen_hvm_config.flags) goto out; r = 0; break; } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; s64 delta; r = -EFAULT; if (copy_from_user(&user_ns, argp, sizeof(user_ns))) goto out; r = -EINVAL; if (user_ns.flags) goto out; r = 0; local_irq_disable(); now_ns = get_kernel_ns(); delta = user_ns.clock - now_ns; local_irq_enable(); kvm->arch.kvmclock_offset = delta; break; } case KVM_GET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; local_irq_disable(); now_ns = get_kernel_ns(); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) goto out; r = 0; break; } default: ; } out: return r; } static void kvm_init_msr_list(void) { u32 dummy[2]; unsigned i, j; /* skip the first msrs in the list. KVM-specific */ for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) continue; if (j < i) msrs_to_save[j] = msrs_to_save[i]; j++; } num_msrs_to_save = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->set_segment(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->get_segment(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { gpa_t t_gpa; struct x86_exception exception; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); return t_gpa; } gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= toread; data += toread; addr += toread; } out: return r; } /* used for instruction fetching */ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access | PFERR_FETCH_MASK, exception); } int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } EXPORT_SYMBOL_GPL(kvm_read_guest_virt); static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= towrite; data += towrite; addr += towrite; } out: return r; } EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; if (vcpu_match_mmio_gva(vcpu, gva) && check_write_user_access(vcpu, write, access, vcpu->arch.access)) { *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | (gva & (PAGE_SIZE - 1)); trace_vcpu_match_mmio(gva, *gpa, write, false); return 1; } if (write) access |= PFERR_WRITE_MASK; *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); if (*gpa == UNMAPPED_GVA) return -1; /* For APIC access vmexit */ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) return 1; if (vcpu_match_mmio_gpa(vcpu, *gpa)) { trace_vcpu_match_mmio(gva, *gpa, write, true); return 1; } return 0; } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) { int ret; ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); if (ret < 0) return 0; kvm_mmu_pte_write(vcpu, gpa, val, bytes); return 1; } struct read_write_emulator_ops { int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, int bytes); int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val); int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); bool write; }; static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_phys_addr, *(u64 *)val); vcpu->mmio_read_completed = 0; return 1; } return 0; } static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); } static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return emulator_write_phys(vcpu, gpa, val, bytes); } static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) { trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); return vcpu_mmio_write(vcpu, gpa, bytes, val); } static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); return X86EMUL_IO_NEEDED; } static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { memcpy(vcpu->mmio_data, val, bytes); memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8); return X86EMUL_CONTINUE; } static struct read_write_emulator_ops read_emultor = { .read_write_prepare = read_prepare, .read_write_emulate = read_emulate, .read_write_mmio = vcpu_mmio_read, .read_write_exit_mmio = read_exit_mmio, }; static struct read_write_emulator_ops write_emultor = { .read_write_emulate = write_emulate, .read_write_mmio = write_mmio, .read_write_exit_mmio = write_exit_mmio, .write = true, }; static int emulator_read_write_onepage(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu, struct read_write_emulator_ops *ops) { gpa_t gpa; int handled, ret; bool write = ops->write; if (ops->read_write_prepare && ops->read_write_prepare(vcpu, val, bytes)) return X86EMUL_CONTINUE; ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); if (ret < 0) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if (ret) goto mmio; if (ops->read_write_emulate(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; mmio: /* * Is this MMIO handled locally? */ handled = ops->read_write_mmio(vcpu, gpa, bytes, val); if (handled == bytes) return X86EMUL_CONTINUE; gpa += handled; bytes -= handled; val += handled; vcpu->mmio_needed = 1; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa; vcpu->mmio_size = bytes; vcpu->run->mmio.len = min(vcpu->mmio_size, 8); vcpu->run->mmio.is_write = vcpu->mmio_is_write = write; vcpu->mmio_index = 0; return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); } int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct read_write_emulator_ops *ops) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int rc, now; now = -addr & ~PAGE_MASK; rc = emulator_read_write_onepage(addr, val, now, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; addr += now; val += now; bytes -= now; } return emulator_read_write_onepage(addr, val, bytes, exception, vcpu, ops); } static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, val, bytes, exception, &read_emultor); } int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, (void *)val, bytes, exception, &write_emultor); } #define CMPXCHG_TYPE(t, ptr, old, new) \ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) #ifdef CONFIG_X86_64 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) #else # define CMPXCHG64(ptr, old, new) \ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; struct page *page; char *kaddr; bool exchanged; /* guests cmpxchg8b have to be emulated atomically */ if (bytes > 8 || (bytes & (bytes - 1))) goto emul_write; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); if (gpa == UNMAPPED_GVA || (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto emul_write; if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) goto emul_write; page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); if (is_error_page(page)) { kvm_release_page_clean(page); goto emul_write; } kaddr = kmap_atomic(page, KM_USER0); kaddr += offset_in_page(gpa); switch (bytes) { case 1: exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); break; case 2: exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); break; case 4: exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); break; case 8: exchanged = CMPXCHG64(kaddr, old, new); break; default: BUG(); } kunmap_atomic(kaddr, KM_USER0); kvm_release_page_dirty(page); if (!exchanged) return X86EMUL_CMPXCHG_FAILED; kvm_mmu_pte_write(vcpu, gpa, new, bytes); return X86EMUL_CONTINUE; emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(ctxt, addr, new, bytes, exception); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) { /* TODO: String I/O for in kernel device */ int r; if (vcpu->arch.pio.in) r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); else r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); return r; } static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, unsigned short port, void *val, unsigned int count, bool in) { trace_kvm_pio(!in, port, size, count); vcpu->arch.pio.port = port; vcpu->arch.pio.in = in; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int ret; if (vcpu->arch.pio.count) goto data_avail; ret = emulator_pio_in_out(vcpu, size, port, val, count, true); if (ret) { data_avail: memcpy(val, vcpu->arch.pio_data, size * count); vcpu->arch.pio.count = 0; return 1; } return 0; } static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, const void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); memcpy(vcpu->arch.pio_data, val, size * count); return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); } static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { return kvm_x86_ops->get_segment_base(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) { kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); } int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) { if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } else wbinvd(); return X86EMUL_CONTINUE; } EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) { kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); } int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) { return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); } int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) { return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; } static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long value; switch (cr) { case 0: value = kvm_read_cr0(vcpu); break; case 2: value = vcpu->arch.cr2; break; case 3: value = kvm_read_cr3(vcpu); break; case 4: value = kvm_read_cr4(vcpu); break; case 8: value = kvm_get_cr8(vcpu); break; default: vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); return 0; } return value; } static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int res = 0; switch (cr) { case 0: res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); break; case 2: vcpu->arch.cr2 = val; break; case 3: res = kvm_set_cr3(vcpu, val); break; case 4: res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: res = kvm_set_cr8(vcpu, val); break; default: vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); res = -1; } return res; } static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( struct x86_emulate_ctxt *ctxt, int seg) { return get_segment_base(emul_to_vcpu(ctxt), seg); } static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, struct desc_struct *desc, u32 *base3, int seg) { struct kvm_segment var; kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); *selector = var.selector; if (var.unusable) return false; if (var.g) var.limit >>= 12; set_desc_limit(desc, var.limit); set_desc_base(desc, (unsigned long)var.base); #ifdef CONFIG_X86_64 if (base3) *base3 = var.base >> 32; #endif desc->type = var.type; desc->s = var.s; desc->dpl = var.dpl; desc->p = var.present; desc->avl = var.avl; desc->l = var.l; desc->d = var.db; desc->g = var.g; return true; } static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, u32 base3, int seg) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_segment var; var.selector = selector; var.base = get_desc_base(desc); #ifdef CONFIG_X86_64 var.base |= ((u64)base3) << 32; #endif var.limit = get_desc_limit(desc); if (desc->g) var.limit = (var.limit << 12) | 0xfff; var.type = desc->type; var.present = desc->p; var.dpl = desc->dpl; var.db = desc->d; var.s = desc->s; var.l = desc->l; var.g = desc->g; var.avl = desc->avl; var.present = desc->p; var.unusable = !var.present; var.padding = 0; kvm_set_segment(vcpu, &var, seg); return; } static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata) { return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); } static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data) { return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); } static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata) { return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); } static void emulator_halt(struct x86_emulate_ctxt *ctxt) { emul_to_vcpu(ctxt)->arch.halt_request = 1; } static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) { preempt_disable(); kvm_load_guest_fpu(emul_to_vcpu(ctxt)); /* * CR0.TS may reference the host fpu state, not the guest fpu state, * so it may be clear at this point. */ clts(); } static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) { preempt_enable(); } static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) { struct kvm_cpuid_entry2 *cpuid = NULL; if (eax && ecx) cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt), *eax, *ecx); if (cpuid) { *eax = cpuid->eax; *ecx = cpuid->ecx; if (ebx) *ebx = cpuid->ebx; if (edx) *edx = cpuid->edx; return true; } return false; } static struct x86_emulate_ops emulate_ops = { .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, .invlpg = emulator_invlpg, .pio_in_emulated = emulator_pio_in_emulated, .pio_out_emulated = emulator_pio_out_emulated, .get_segment = emulator_get_segment, .set_segment = emulator_set_segment, .get_cached_segment_base = emulator_get_cached_segment_base, .get_gdt = emulator_get_gdt, .get_idt = emulator_get_idt, .set_gdt = emulator_set_gdt, .set_idt = emulator_set_idt, .get_cr = emulator_get_cr, .set_cr = emulator_set_cr, .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, .set_msr = emulator_set_msr, .get_msr = emulator_get_msr, .read_pmc = emulator_read_pmc, .halt = emulator_halt, .wbinvd = emulator_wbinvd, .fix_hypercall = emulator_fix_hypercall, .get_fpu = emulator_get_fpu, .put_fpu = emulator_put_fpu, .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, }; static void cache_all_regs(struct kvm_vcpu *vcpu) { kvm_register_read(vcpu, VCPU_REGS_RAX); kvm_register_read(vcpu, VCPU_REGS_RSP); kvm_register_read(vcpu, VCPU_REGS_RIP); vcpu->arch.regs_dirty = ~0; } static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or * not, left the system with the INT_STI flag enabled, it * means that the last instruction is an sti. We should not * leave the flag on in this case. The same goes for mov ss */ if (!(int_shadow & mask)) kvm_x86_ops->set_interrupt_shadow(vcpu, mask); } static void inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; if (ctxt->exception.vector == PF_VECTOR) kvm_propagate_fault(vcpu, &ctxt->exception); else if (ctxt->exception.error_code_valid) kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt->exception.error_code); else kvm_queue_exception(vcpu, ctxt->exception.vector); } static void init_decode_cache(struct x86_emulate_ctxt *ctxt, const unsigned long *regs) { memset(&ctxt->twobyte, 0, (void *)&ctxt->regs - (void *)&ctxt->twobyte); memcpy(ctxt->regs, regs, sizeof(ctxt->regs)); ctxt->fetch.start = 0; ctxt->fetch.end = 0; ctxt->io_read.pos = 0; ctxt->io_read.end = 0; ctxt->mem_read.pos = 0; ctxt->mem_read.end = 0; } static void init_emulate_ctxt(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int cs_db, cs_l; /* * TODO: fix emulate.c to use guest_read/write_register * instead of direct ->regs accesses, can save hundred cycles * on Intel for instructions that don't read/change RSP, for * for example. */ cache_all_regs(vcpu); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : cs_l ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; ctxt->guest_mode = is_guest_mode(vcpu); init_decode_cache(ctxt, vcpu->arch.regs); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; } int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ctxt->op_bytes = 2; ctxt->ad_bytes = 2; ctxt->_eip = ctxt->eip + inc_eip; ret = emulate_int_real(ctxt, irq); if (ret != X86EMUL_CONTINUE) return EMULATE_FAIL; ctxt->eip = ctxt->_eip; memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); if (irq == NMI_VECTOR) vcpu->arch.nmi_pending = 0; else vcpu->arch.interrupt.pending = false; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) { gpa_t gpa; if (tdp_enabled) return false; /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-entetr the * guest to let CPU execute the instruction. */ if (kvm_mmu_unprotect_page_virt(vcpu, gva)) return true; gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL); if (gpa == UNMAPPED_GVA) return true; /* let cpu generate fault */ if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT))) return true; return false; } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, unsigned long cr2, int emulation_type) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long last_retry_eip, last_retry_addr, gpa = cr2; last_retry_eip = vcpu->arch.last_retry_eip; last_retry_addr = vcpu->arch.last_retry_addr; /* * If the emulation is caused by #PF and it is non-page_table * writing instruction, it means the VM-EXIT is caused by shadow * page protected, we can zap the shadow page and retry this * instruction directly. * * Note: if the guest uses a non-page-table modifying instruction * on the PDE that points to the instruction, then we will unmap * the instruction and go to an infinite loop. So, we cache the * last retried eip and the last fault address, if we meet the eip * and the address again, we can break out of the potential infinite * loop. */ vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; if (!(emulation_type & EMULTYPE_RETRY)) return false; if (x86_page_table_writing_insn(ctxt)) return false; if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) return false; vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_addr = cr2; if (!vcpu->arch.mmu.direct_map) gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); return true; } int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int emulation_type, void *insn, int insn_len) { int r; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; bool writeback = true; kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); ctxt->interruptibility = 0; ctxt->have_exception = false; ctxt->perm_ok = false; ctxt->only_vendor_specific_insn = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); trace_kvm_emulate_insn_start(vcpu); ++vcpu->stat.insn_emulation; if (r != EMULATION_OK) { if (emulation_type & EMULTYPE_TRAP_UD) return EMULATE_FAIL; if (reexecute_instruction(vcpu, cr2)) return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); } } if (emulation_type & EMULTYPE_SKIP) { kvm_rip_write(vcpu, ctxt->_eip); return EMULATE_DONE; } if (retry_instruction(ctxt, cr2, emulation_type)) return EMULATE_DONE; /* this is needed for vmware backdoor interface to work since it changes registers values during IO operation */ if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { vcpu->arch.emulate_regs_need_sync_from_vcpu = false; memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs); } restart: r = x86_emulate_insn(ctxt); if (r == EMULATION_INTERCEPTED) return EMULATE_DONE; if (r == EMULATION_FAILED) { if (reexecute_instruction(vcpu, cr2)) return EMULATE_DONE; return handle_emulation_failure(vcpu); } if (ctxt->have_exception) { inject_emulated_exception(vcpu); r = EMULATE_DONE; } else if (vcpu->arch.pio.count) { if (!vcpu->arch.pio.in) vcpu->arch.pio.count = 0; else writeback = false; r = EMULATE_DO_MMIO; } else if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) writeback = false; r = EMULATE_DO_MMIO; } else if (r == EMULATION_RESTART) goto restart; else r = EMULATE_DONE; if (writeback) { toggle_interruptibility(vcpu, ctxt->interruptibility); kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); } else vcpu->arch.emulate_regs_need_sync_to_vcpu = true; return r; } EXPORT_SYMBOL_GPL(x86_emulate_instruction); int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, size, port, &val, 1); /* do not return to emulator after return from userspace */ vcpu->arch.pio.count = 0; return ret; } EXPORT_SYMBOL_GPL(kvm_fast_pio_out); static void tsc_bad(void *info) { __this_cpu_write(cpu_tsc_khz, 0); } static void tsc_khz_changed(void *data) { struct cpufreq_freqs *freq = data; unsigned long khz = 0; if (data) khz = freq->new; else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) khz = cpufreq_quick_get(raw_smp_processor_id()); if (!khz) khz = tsc_khz; __this_cpu_write(cpu_tsc_khz, khz); } static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); raw_spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } raw_spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; } static struct notifier_block kvmclock_cpufreq_notifier_block = { .notifier_call = kvmclock_cpufreq_notifier }; static int kvmclock_cpu_notifier(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, tsc_bad, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block kvmclock_cpu_notifier_block = { .notifier_call = kvmclock_cpu_notifier, .priority = -INT_MAX }; static void kvm_timer_init(void) { int cpu; max_tsc_khz = tsc_khz; register_hotcpu_notifier(&kvmclock_cpu_notifier_block); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ struct cpufreq_policy policy; memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); cpufreq_get_policy(&policy, cpu); if (policy.cpuinfo.max_freq) max_tsc_khz = policy.cpuinfo.max_freq; put_cpu(); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); for_each_online_cpu(cpu) smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); } static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); int kvm_is_in_guest(void) { return __this_cpu_read(current_vcpu) != NULL; } static int kvm_is_user_mode(void) { int user_mode = 3; if (__this_cpu_read(current_vcpu)) user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); return user_mode != 0; } static unsigned long kvm_get_guest_ip(void) { unsigned long ip = 0; if (__this_cpu_read(current_vcpu)) ip = kvm_rip_read(__this_cpu_read(current_vcpu)); return ip; } static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, }; void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, vcpu); } EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, NULL); } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); static void kvm_set_mmio_spte_mask(void) { u64 mask; int maxphyaddr = boot_cpu_data.x86_phys_bits; /* * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr; mask |= 1ull; #ifdef CONFIG_X86_64 /* * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ if (maxphyaddr == 52) mask &= ~1ull; #endif kvm_mmu_set_mmio_spte_mask(mask); } int kvm_arch_init(void *opaque) { int r; struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); r = -EEXIST; goto out; } if (!ops->cpu_has_kvm_support()) { printk(KERN_ERR "kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { printk(KERN_ERR "kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } r = kvm_mmu_module_init(); if (r) goto out; kvm_set_mmio_spte_mask(); kvm_init_msr_list(); kvm_x86_ops = ops; kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); kvm_timer_init(); perf_register_guest_info_callbacks(&kvm_guest_cbs); if (cpu_has_xsave) host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); return 0; out: return r; } void kvm_arch_exit(void) { perf_unregister_guest_info_callbacks(&kvm_guest_cbs); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); kvm_x86_ops = NULL; kvm_mmu_module_exit(); } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ++vcpu->stat.halt_exits; if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; return 1; } else { vcpu->run->exit_reason = KVM_EXIT_HLT; return 0; } } EXPORT_SYMBOL_GPL(kvm_emulate_halt); int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret; uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; bool fast, longmode; int cs_db, cs_l; /* * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); longmode = is_long_mode(vcpu) && cs_l == 1; if (!longmode) { param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); } #ifdef CONFIG_X86_64 else { param = kvm_register_read(vcpu, VCPU_REGS_RCX); ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); } #endif code = param & 0xffff; fast = (param >> 16) & 0x1; rep_cnt = (param >> 32) & 0xfff; rep_idx = (param >> 48) & 0xfff; trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); switch (code) { case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; default: res = HV_STATUS_INVALID_HYPERCALL_CODE; break; } ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { kvm_register_write(vcpu, VCPU_REGS_RAX, ret); } else { kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); } return 1; } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; int r = 1; if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); trace_kvm_hypercall(nr, a0, a1, a2, a3); if (!is_long_mode(vcpu)) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; a1 &= 0xFFFFFFFF; a2 &= 0xFFFFFFFF; a3 &= 0xFFFFFFFF; } if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; goto out; } switch (nr) { case KVM_HC_VAPIC_POLL_IRQ: ret = 0; break; default: ret = -KVM_ENOSYS; break; } out: kvm_register_write(vcpu, VCPU_REGS_RAX, ret); ++vcpu->stat.hypercalls; return r; } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); /* * Blow out the MMU to ensure that no other VCPU has an active mapping * to ensure that the updated hypercall appears atomically across all * VCPUs. */ kvm_mmu_zap_all(vcpu->kvm); kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); } /* * Check if userspace requested an interrupt window, and that the * interrupt window is open. * * No need to exit to userspace if we already have an interrupt queued. */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && vcpu->run->request_interrupt_window && kvm_arch_interrupt_allowed(vcpu)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); } static void vapic_enter(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct page *page; if (!apic || !apic->vapic_addr) return; page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); vcpu->arch.apic->vapic_page = page; } static void vapic_exit(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; int idx; if (!apic || !apic->vapic_addr) return; idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_release_page_dirty(apic->vapic_page); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); srcu_read_unlock(&vcpu->kvm->srcu, idx); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; if (!vcpu->arch.apic) return; if (!vcpu->arch.apic->vapic_addr) max_irr = kvm_lapic_find_highest_irr(vcpu); else max_irr = -1; if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static void inject_pending_event(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.reinject); return; } if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu); return; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_interrupt(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); kvm_x86_ops->set_irq(vcpu); } } } static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } } static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); vcpu->guest_xcr0_loaded = 0; } } static void process_nmi(struct kvm_vcpu *vcpu) { unsigned limit = 2; /* * x86 is limited to one NMI running, and one NMI pending after it. * If an NMI is already in progress, limit further NMIs to just one. * Otherwise, allow two (and we'll inject the first one immediately). */ if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) limit = 1; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); kvm_make_request(KVM_REQ_EVENT, vcpu); } static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window; bool req_immediate_exit = 0; if (vcpu->requests) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) kvm_mmu_unload(vcpu); if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { r = kvm_guest_time_update(vcpu); if (unlikely(r)) goto out; } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_x86_ops->tlb_flush(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { /* Page is swapped out. Do synthetic halt */ vcpu->arch.apf.halted = true; r = 1; goto out; } if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) record_steal_time(vcpu); if (kvm_check_request(KVM_REQ_NMI, vcpu)) process_nmi(vcpu); req_immediate_exit = kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); if (kvm_check_request(KVM_REQ_PMU, vcpu)) kvm_handle_pmu_event(vcpu); if (kvm_check_request(KVM_REQ_PMI, vcpu)) kvm_deliver_pmi(vcpu); } r = kvm_mmu_reload(vcpu); if (unlikely(r)) goto out; if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { inject_pending_event(vcpu); /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) kvm_x86_ops->enable_nmi_window(vcpu); else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) kvm_x86_ops->enable_irq_window(vcpu); if (kvm_lapic_enabled(vcpu)) { update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } } preempt_disable(); kvm_x86_ops->prepare_guest_switch(vcpu); if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); kvm_load_guest_xcr0(vcpu); vcpu->mode = IN_GUEST_MODE; /* We should set ->mode before check ->requests, * see the comment in make_all_cpus_request. */ smp_mb(); local_irq_disable(); if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests || need_resched() || signal_pending(current)) { vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); local_irq_enable(); preempt_enable(); kvm_x86_ops->cancel_injection(vcpu); r = 1; goto out; } srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (req_immediate_exit) smp_send_reschedule(vcpu->cpu); kvm_guest_enter(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[3], 3); } trace_kvm_entry(vcpu->vcpu_id); kvm_x86_ops->run(vcpu); /* * If the guest has used debug registers, at least dr7 * will be disabled while returning to the host. * If we don't have active breakpoints in the host, we don't * care about the messed up debug address registers. But if * we have some of them active, restore the old state. */ if (hw_breakpoint_active()) hw_breakpoint_restore(); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu); vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); local_irq_enable(); ++vcpu->stat.exits; /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* * Profile KVM exit RIPs: */ if (unlikely(prof_on == KVM_PROFILING)) { unsigned long rip = kvm_rip_read(vcpu); profile_hit(KVM_PROFILING, (void *)rip); } kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); out: return r; } static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; struct kvm *kvm = vcpu->kvm; if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { pr_debug("vcpu %d received sipi with vector # %x\n", vcpu->vcpu_id, vcpu->arch.sipi_vector); kvm_lapic_reset(vcpu); r = kvm_arch_vcpu_reset(vcpu); if (r) return r; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vapic_enter(vcpu); r = 1; while (r > 0) { if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_SIPI_RECEIVED: default: r = -EINTR; break; } } } if (r <= 0) break; clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); if (dm_request_for_irq_injection(vcpu)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } kvm_check_async_pf_completion(vcpu); if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_resched(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); vapic_exit(vcpu); return r; } static int complete_mmio(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; int r; if (!(vcpu->arch.pio.count || vcpu->mmio_needed)) return 1; if (vcpu->mmio_needed) { vcpu->mmio_needed = 0; if (!vcpu->mmio_is_write) memcpy(vcpu->mmio_data + vcpu->mmio_index, run->mmio.data, 8); vcpu->mmio_index += 8; if (vcpu->mmio_index < vcpu->mmio_size) { run->exit_reason = KVM_EXIT_MMIO; run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index; memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8); run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8); run->mmio.is_write = vcpu->mmio_is_write; vcpu->mmio_needed = 1; return 0; } if (vcpu->mmio_is_write) return 1; vcpu->mmio_read_completed = 1; } vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) return 0; return 1; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (!tsk_used_math(current) && init_fpu(current)) return -ENOMEM; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } /* re-sync apic's tpr */ if (!irqchip_in_kernel(vcpu->kvm)) { if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { r = -EINVAL; goto out; } } r = complete_mmio(vcpu); if (r <= 0) goto out; r = __vcpu_run(vcpu); out: post_kvm_run_save(vcpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { /* * We are here if userspace calls get_regs() in the middle of * instruction emulation. Registers state needs to be copied * back from emulation context to vcpu. Usrapace shouldn't do * that usually, but some bad designed PV devices (vmware * backdoor interface) need this to work */ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; } regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); #ifdef CONFIG_X86_64 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); #endif regs->rip = kvm_rip_read(vcpu); regs->rflags = kvm_get_rflags(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu->arch.emulate_regs_need_sync_from_vcpu = true; vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); #ifdef CONFIG_X86_64 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); #endif kvm_rip_write(vcpu, regs->rip); kvm_set_rflags(vcpu, regs->rflags); vcpu->arch.exception.pending = false; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); *db = cs.db; *l = cs.l; } EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct desc_ptr dt; kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); kvm_x86_ops->get_idt(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; kvm_x86_ops->get_gdt(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = kvm_read_cr3(vcpu); sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, (unsigned long *)sregs->interrupt_bitmap); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { mp_state->mp_state = vcpu->arch.mp_state; return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { vcpu->arch.mp_state = mp_state->mp_state; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, bool has_error_code, u32 error_code) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ret = emulator_task_switch(ctxt, tss_selector, reason, has_error_code, error_code); if (ret) return EMULATE_FAIL; memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_task_switch); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int mmu_reset_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_set_apic_base(vcpu, sregs->apic_base); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (sregs->cr4 & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); if (!is_long_mode(vcpu) && is_pae(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } srcu_read_unlock(&vcpu->kvm->srcu, idx); if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); max_bits = (sizeof sregs->interrupt_bitmap) << 3; pending_vec = find_first_bit( (const unsigned long *)sregs->interrupt_bitmap, max_bits); if (pending_vec < max_bits) { kvm_queue_interrupt(vcpu, pending_vec, false); pr_debug("Set back pending irq %d\n", pending_vec); } kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); update_cr8_intercept(vcpu); /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && !is_protmode(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { unsigned long rflags; int i, r; if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) goto out; if (dbg->control & KVM_GUESTDBG_INJECT_DB) kvm_queue_exception(vcpu, DB_VECTOR); else kvm_queue_exception(vcpu, BP_VECTOR); } /* * Read rflags as long as potentially injected trace flags are still * filtered out. */ rflags = kvm_get_rflags(vcpu); vcpu->guest_debug = dbg->control; if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { for (i = 0; i < KVM_NR_DB_REGS; ++i) vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; vcpu->arch.switch_db_regs = (dbg->arch.debugreg[7] & DR7_BP_EN_MASK); } else { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK); } if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); /* * Trigger an rflags update that will inject or remove the trace * flags. */ kvm_set_rflags(vcpu, rflags); kvm_x86_ops->set_guest_debug(vcpu, dbg); r = 0; out: return r; } /* * Translate a guest virtual address to a guest physical address. */ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { unsigned long vaddr = tr->linear_address; gpa_t gpa; int idx; idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; tr->writeable = 1; tr->usermode = 0; return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; fpu->ftwx = fxsave->twd; fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; fxsave->swd = fpu->fsw; fxsave->twd = fpu->ftwx; fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); return 0; } int fx_init(struct kvm_vcpu *vcpu) { int err; err = fpu_alloc(&vcpu->arch.guest_fpu); if (err) return err; fpu_finit(&vcpu->arch.guest_fpu); /* * Ensure guest xcr0 is valid for loading */ vcpu->arch.xcr0 = XSTATE_FP; vcpu->arch.cr0 |= X86_CR0_ET; return 0; } EXPORT_SYMBOL_GPL(fx_init); static void fx_free(struct kvm_vcpu *vcpu) { fpu_free(&vcpu->arch.guest_fpu); } void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->guest_fpu_loaded) return; /* * Restore all possible states in the guest, * and assume host would use all available bits. * Guest xcr0 would be loaded later. */ kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; unlazy_fpu(current); fpu_restore_checking(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); } void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { kvm_put_guest_xcr0(vcpu); if (!vcpu->guest_fpu_loaded) return; vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); ++vcpu->stat.fpu_reload; kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvmclock_reset(vcpu); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) printk_once(KERN_WARNING "kvm: SMP vm created on host with unstable TSC; " "guest TSC will not be reliable\n"); return kvm_x86_ops->vcpu_create(kvm, id); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int r; vcpu->arch.mtrr_state.have_fixed = 1; vcpu_load(vcpu); r = kvm_arch_vcpu_reset(vcpu); if (r == 0) r = kvm_mmu_setup(vcpu); vcpu_put(vcpu); return r; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { vcpu->arch.apf.msr_val = 0; vcpu_load(vcpu); kvm_mmu_unload(vcpu); vcpu_put(vcpu); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_injected = false; vcpu->arch.switch_db_regs = 0; memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1; kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; vcpu->arch.st.msr_val = 0; kvmclock_reset(vcpu); kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; kvm_pmu_reset(vcpu); return kvm_x86_ops->vcpu_reset(vcpu); } int kvm_arch_hardware_enable(void *garbage) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; kvm_shared_msr_cpu_online(); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) if (vcpu->cpu == smp_processor_id()) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return kvm_x86_ops->hardware_enable(garbage); } void kvm_arch_hardware_disable(void *garbage) { kvm_x86_ops->hardware_disable(garbage); drop_user_return_notifiers(garbage); } int kvm_arch_hardware_setup(void) { return kvm_x86_ops->hardware_setup(); } void kvm_arch_hardware_unsetup(void) { kvm_x86_ops->hardware_unsetup(); } void kvm_arch_check_processor_compat(void *rtn) { kvm_x86_ops->check_processor_compatibility(rtn); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; struct kvm *kvm; int r; BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; vcpu->arch.emulate_ctxt.ops = &emulate_ops; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->arch.pio_data = page_address(page); kvm_init_tsc_catchup(vcpu, max_tsc_khz); r = kvm_mmu_create(vcpu); if (r < 0) goto fail_free_pio_data; if (irqchip_in_kernel(kvm)) { r = kvm_create_lapic(vcpu); if (r < 0) goto fail_mmu_destroy; } vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, GFP_KERNEL); if (!vcpu->arch.mce_banks) { r = -ENOMEM; goto fail_free_lapic; } vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) goto fail_free_mce_banks; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); return 0; fail_free_mce_banks: kfree(vcpu->arch.mce_banks); fail_free_lapic: kvm_free_lapic(vcpu); fail_mmu_destroy: kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); fail: return r; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { int idx; kvm_pmu_destroy(vcpu); kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_mmu_destroy(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); } int kvm_arch_init_vm(struct kvm *kvm) { INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); raw_spin_lock_init(&kvm->arch.tsc_write_lock); return 0; } static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { vcpu_load(vcpu); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* * Unpin any mmu pages first. */ kvm_for_each_vcpu(i, vcpu, kvm) { kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); } kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { kvm_free_all_assigned_devices(kvm); kvm_free_pit(kvm); } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); if (kvm->arch.ept_identity_pagetable) put_page(kvm->arch.ept_identity_pagetable); } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, int user_alloc) { int npages = memslot->npages; int map_flags = MAP_PRIVATE | MAP_ANONYMOUS; /* Prevent internal slot pages from being moved by fork()/COW. */ if (memslot->id >= KVM_MEMORY_SLOTS) map_flags = MAP_SHARED | MAP_ANONYMOUS; /*To keep backward compatibility with older userspace, *x86 needs to hanlde !user_alloc case. */ if (!user_alloc) { if (npages && !old.rmap) { unsigned long userspace_addr; down_write(&current->mm->mmap_sem); userspace_addr = do_mmap(NULL, 0, npages * PAGE_SIZE, PROT_READ | PROT_WRITE, map_flags, 0); up_write(&current->mm->mmap_sem); if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); memslot->userspace_addr = userspace_addr; } } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc) { int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; if (!user_alloc && !old.user_alloc && old.rmap && !npages) { int ret; down_write(&current->mm->mmap_sem); ret = do_munmap(current->mm, old.userspace_addr, old.npages * PAGE_SIZE); up_write(&current->mm->mmap_sem); if (ret < 0) printk(KERN_WARNING "kvm_vm_ioctl_set_memory_region: " "failed to munmap memory\n"); } if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); spin_lock(&kvm->mmu_lock); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); kvm_mmu_slot_remove_write_access(kvm, mem->slot); spin_unlock(&kvm->mmu_lock); } void kvm_arch_flush_shadow(struct kvm *kvm) { kvm_mmu_zap_all(kvm); kvm_reload_remote_mmus(kvm); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED || atomic_read(&vcpu->arch.nmi_queued) || (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)); } void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int me; int cpu = vcpu->cpu; if (waitqueue_active(&vcpu->wq)) { wake_up_interruptible(&vcpu->wq); ++vcpu->stat.halt_wakeup; } me = get_cpu(); if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE) smp_send_reschedule(cpu); put_cpu(); } int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { return kvm_x86_ops->interrupt_allowed(vcpu); } bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { unsigned long current_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); return current_rip == linear_rip; } EXPORT_SYMBOL_GPL(kvm_is_linear_rip); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; rflags = kvm_x86_ops->get_rflags(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; } EXPORT_SYMBOL_GPL(kvm_get_rflags); void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; kvm_x86_ops->set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } EXPORT_SYMBOL_GPL(kvm_set_rflags); void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || is_error_page(work->page)) return; r = kvm_mmu_reload(vcpu); if (unlikely(r)) return; if (!vcpu->arch.mmu.direct_map && work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) return; vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); } static inline u32 kvm_async_pf_next_probe(u32 key) { return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); } static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 key = kvm_async_pf_hash_fn(gfn); while (vcpu->arch.apf.gfns[key] != ~0) key = kvm_async_pf_next_probe(key); vcpu->arch.apf.gfns[key] = gfn; } static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) { int i; u32 key = kvm_async_pf_hash_fn(gfn); for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && (vcpu->arch.apf.gfns[key] != gfn && vcpu->arch.apf.gfns[key] != ~0); i++) key = kvm_async_pf_next_probe(key); return key; } bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; } static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 i, j, k; i = j = kvm_async_pf_gfn_slot(vcpu, gfn); while (true) { vcpu->arch.apf.gfns[i] = ~0; do { j = kvm_async_pf_next_probe(j); if (vcpu->arch.apf.gfns[j] == ~0) return; k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); /* * k lies cyclically in ]i,j] * | i.k.j | * |....j i.k.| or |.k..j i...| */ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; i = j; } } static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) { return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, sizeof(val)); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || (vcpu->arch.apf.send_user_only && kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_make_request(KVM_REQ_APF_HALT, vcpu); else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } } void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_ready(work->arch.token, work->gva); if (is_error_page(work->page)) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; else return !kvm_event_needs_reinjection(vcpu) && kvm_x86_ops->interrupt_allowed(vcpu); } EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3622_1
crossvul-cpp_data_good_3486_13
/* * Performance event support - powerpc architecture code * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <asm/reg.h> #include <asm/pmc.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/ptrace.h> struct cpu_hw_events { int n_events; int n_percpu; int disabled; int n_added; int n_limited; u8 pmcs_enabled; struct perf_event *event[MAX_HWEVENTS]; u64 events[MAX_HWEVENTS]; unsigned int flags[MAX_HWEVENTS]; unsigned long mmcr[3]; struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; unsigned int group_flag; int n_txn_start; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); struct power_pmu *ppmu; /* * Normally, to ignore kernel events we set the FCS (freeze counters * in supervisor mode) bit in MMCR0, but if the kernel runs with the * hypervisor bit set in the MSR, or if we are running on a processor * where the hypervisor bit is forced to 1 (as on Apple G5 processors), * then we need to use the FCHV bit to ignore kernel events. */ static unsigned int freeze_events_kernel = MMCR0_FCS; /* * 32-bit doesn't have MMCRA but does have an MMCR2, * and a few other names are different. */ #ifdef CONFIG_PPC32 #define MMCR0_FCHV 0 #define MMCR0_PMCjCE MMCR0_PMCnCE #define SPRN_MMCRA SPRN_MMCR2 #define MMCRA_SAMPLE_ENABLE 0 static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { return 0; } static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } static inline u32 perf_get_misc_flags(struct pt_regs *regs) { return 0; } static inline void perf_read_regs(struct pt_regs *regs) { } static inline int perf_intr_is_nmi(struct pt_regs *regs) { return 0; } #endif /* CONFIG_PPC32 */ /* * Things that are specific to 64-bit implementations. */ #ifdef CONFIG_PPC64 static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; if (slot > 1) return 4 * (slot - 1); } return 0; } /* * The user wants a data address recorded. * If we're not doing instruction sampling, give them the SDAR * (sampled data address). If we are doing instruction sampling, then * only give them the SDAR if it corresponds to the instruction * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC * bit in MMCRA. */ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { unsigned long mmcra = regs->dsisr; unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) *addrp = mfspr(SPRN_SDAR); } static inline u32 perf_get_misc_flags(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; unsigned long sihv = MMCRA_SIHV; unsigned long sipr = MMCRA_SIPR; if (TRAP(regs) != 0xf00) return 0; /* not a PMU interrupt */ if (ppmu->flags & PPMU_ALT_SIPR) { sihv = POWER6_MMCRA_SIHV; sipr = POWER6_MMCRA_SIPR; } /* PR has priority over HV, so order below is important */ if (mmcra & sipr) return PERF_RECORD_MISC_USER; if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV)) return PERF_RECORD_MISC_HYPERVISOR; return PERF_RECORD_MISC_KERNEL; } /* * Overload regs->dsisr to store MMCRA so we only need to read it once * on each interrupt. */ static inline void perf_read_regs(struct pt_regs *regs) { regs->dsisr = mfspr(SPRN_MMCRA); } /* * If interrupts were soft-disabled when a PMU interrupt occurs, treat * it as an NMI. */ static inline int perf_intr_is_nmi(struct pt_regs *regs) { return !regs->softe; } #endif /* CONFIG_PPC64 */ static void perf_event_interrupt(struct pt_regs *regs); void perf_event_print_debug(void) { } /* * Read one performance monitor counter (PMC). */ static unsigned long read_pmc(int idx) { unsigned long val; switch (idx) { case 1: val = mfspr(SPRN_PMC1); break; case 2: val = mfspr(SPRN_PMC2); break; case 3: val = mfspr(SPRN_PMC3); break; case 4: val = mfspr(SPRN_PMC4); break; case 5: val = mfspr(SPRN_PMC5); break; case 6: val = mfspr(SPRN_PMC6); break; #ifdef CONFIG_PPC64 case 7: val = mfspr(SPRN_PMC7); break; case 8: val = mfspr(SPRN_PMC8); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to read PMC%d\n", idx); val = 0; } return val; } /* * Write one PMC. */ static void write_pmc(int idx, unsigned long val) { switch (idx) { case 1: mtspr(SPRN_PMC1, val); break; case 2: mtspr(SPRN_PMC2, val); break; case 3: mtspr(SPRN_PMC3, val); break; case 4: mtspr(SPRN_PMC4, val); break; case 5: mtspr(SPRN_PMC5, val); break; case 6: mtspr(SPRN_PMC6, val); break; #ifdef CONFIG_PPC64 case 7: mtspr(SPRN_PMC7, val); break; case 8: mtspr(SPRN_PMC8, val); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to write PMC%d\n", idx); } } /* * Check if a set of events can all go on the PMU at once. * If they can't, this will look at alternative codes for the events * and see if any combination of alternative codes is feasible. * The feasible set is returned in event_id[]. */ static int power_check_constraints(struct cpu_hw_events *cpuhw, u64 event_id[], unsigned int cflags[], int n_ev) { unsigned long mask, value, nv; unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS]; int i, j; unsigned long addf = ppmu->add_fields; unsigned long tadd = ppmu->test_adder; if (n_ev > ppmu->n_counter) return -1; /* First see if the events will go on as-is */ for (i = 0; i < n_ev; ++i) { if ((cflags[i] & PPMU_LIMITED_PMC_REQD) && !ppmu->limited_pmc_event(event_id[i])) { ppmu->get_alternatives(event_id[i], cflags[i], cpuhw->alternatives[i]); event_id[i] = cpuhw->alternatives[i][0]; } if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], &cpuhw->avalues[i][0])) return -1; } value = mask = 0; for (i = 0; i < n_ev; ++i) { nv = (value | cpuhw->avalues[i][0]) + (value & cpuhw->avalues[i][0] & addf); if ((((nv + tadd) ^ value) & mask) != 0 || (((nv + tadd) ^ cpuhw->avalues[i][0]) & cpuhw->amasks[i][0]) != 0) break; value = nv; mask |= cpuhw->amasks[i][0]; } if (i == n_ev) return 0; /* all OK */ /* doesn't work, gather alternatives... */ if (!ppmu->get_alternatives) return -1; for (i = 0; i < n_ev; ++i) { choice[i] = 0; n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i], cpuhw->alternatives[i]); for (j = 1; j < n_alt[i]; ++j) ppmu->get_constraint(cpuhw->alternatives[i][j], &cpuhw->amasks[i][j], &cpuhw->avalues[i][j]); } /* enumerate all possibilities and see if any will work */ i = 0; j = -1; value = mask = nv = 0; while (i < n_ev) { if (j >= 0) { /* we're backtracking, restore context */ value = svalues[i]; mask = smasks[i]; j = choice[i]; } /* * See if any alternative k for event_id i, * where k > j, will satisfy the constraints. */ while (++j < n_alt[i]) { nv = (value | cpuhw->avalues[i][j]) + (value & cpuhw->avalues[i][j] & addf); if ((((nv + tadd) ^ value) & mask) == 0 && (((nv + tadd) ^ cpuhw->avalues[i][j]) & cpuhw->amasks[i][j]) == 0) break; } if (j >= n_alt[i]) { /* * No feasible alternative, backtrack * to event_id i-1 and continue enumerating its * alternatives from where we got up to. */ if (--i < 0) return -1; } else { /* * Found a feasible alternative for event_id i, * remember where we got up to with this event_id, * go on to the next event_id, and start with * the first alternative for it. */ choice[i] = j; svalues[i] = value; smasks[i] = mask; value = nv; mask |= cpuhw->amasks[i][j]; ++i; j = -1; } } /* OK, we have a feasible combination, tell the caller the solution */ for (i = 0; i < n_ev; ++i) event_id[i] = cpuhw->alternatives[i][choice[i]]; return 0; } /* * Check if newly-added events have consistent settings for * exclude_{user,kernel,hv} with each other and any previously * added events. */ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], int n_prev, int n_new) { int eu = 0, ek = 0, eh = 0; int i, n, first; struct perf_event *event; n = n_prev + n_new; if (n <= 1) return 0; first = 1; for (i = 0; i < n; ++i) { if (cflags[i] & PPMU_LIMITED_PMC_OK) { cflags[i] &= ~PPMU_LIMITED_PMC_REQD; continue; } event = ctrs[i]; if (first) { eu = event->attr.exclude_user; ek = event->attr.exclude_kernel; eh = event->attr.exclude_hv; first = 0; } else if (event->attr.exclude_user != eu || event->attr.exclude_kernel != ek || event->attr.exclude_hv != eh) { return -EAGAIN; } } if (eu || ek || eh) for (i = 0; i < n; ++i) if (cflags[i] & PPMU_LIMITED_PMC_OK) cflags[i] |= PPMU_LIMITED_PMC_REQD; return 0; } static u64 check_and_compute_delta(u64 prev, u64 val) { u64 delta = (val - prev) & 0xfffffffful; /* * POWER7 can roll back counter values, if the new value is smaller * than the previous value it will cause the delta and the counter to * have bogus values unless we rolled a counter over. If a coutner is * rolled back, it will be smaller, but within 256, which is the maximum * number of events to rollback at once. If we dectect a rollback * return 0. This can lead to a small lack of precision in the * counters. */ if (prev > val && (prev - val) < 256) delta = 0; return delta; } static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; if (event->hw.state & PERF_HES_STOPPED) return; if (!event->hw.idx) return; /* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. * Therefore we treat them like NMIs. */ do { prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); delta = check_and_compute_delta(prev, val); if (!delta) return; } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } /* * On some machines, PMC5 and PMC6 can't be written, don't respect * the freeze conditions, and don't generate interrupts. This tells * us if `event' is using such a PMC. */ static int is_limited_pmc(int pmcnum) { return (ppmu->flags & PPMU_LIMITED_PMC5_6) && (pmcnum == 5 || pmcnum == 6); } static void freeze_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; u64 val, prev, delta; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; if (!event->hw.idx) continue; val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; delta = check_and_compute_delta(prev, val); if (delta) local64_add(delta, &event->count); } } static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; u64 val, prev; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); if (check_and_compute_delta(prev, val)) local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } } /* * Since limited events don't respect the freeze conditions, we * have to read them immediately after freezing or unfreezing the * other events. We try to keep the values from the limited * events as consistent as possible by keeping the delay (in * cycles and instructions) between freezing/unfreezing and reading * the limited events as small and consistent as possible. * Therefore, if any limited events are in use, we read them * both, and always in the same order, to minimize variability, * and do it inside the same asm that writes MMCR0. */ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) { unsigned long pmc5, pmc6; if (!cpuhw->n_limited) { mtspr(SPRN_MMCR0, mmcr0); return; } /* * Write MMCR0, then read PMC5 and PMC6 immediately. * To ensure we don't get a performance monitor interrupt * between writing MMCR0 and freezing/thawing the limited * events, we first write MMCR0 with the event overflow * interrupt enable bits turned off. */ asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" : "=&r" (pmc5), "=&r" (pmc6) : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), "i" (SPRN_MMCR0), "i" (SPRN_PMC5), "i" (SPRN_PMC6)); if (mmcr0 & MMCR0_FC) freeze_limited_counters(cpuhw, pmc5, pmc6); else thaw_limited_counters(cpuhw, pmc5, pmc6); /* * Write the full MMCR0 including the event overflow interrupt * enable bits, if necessary. */ if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) mtspr(SPRN_MMCR0, mmcr0); } /* * Disable all events to prevent PMU interrupts and to allow * events to be added or removed. */ static void power_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; if (!ppmu) return; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { cpuhw->disabled = 1; cpuhw->n_added = 0; /* * Check if we ever enabled the PMU on this cpu. */ if (!cpuhw->pmcs_enabled) { ppc_enable_pmcs(); cpuhw->pmcs_enabled = 1; } /* * Disable instruction sampling if it was enabled */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mb(); } /* * Set the 'freeze counters' bit. * The barrier is to make sure the mtspr has been * executed and the PMU has frozen the events * before we return. */ write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); mb(); } local_irq_restore(flags); } /* * Re-enable all events if disable == 0. * If we were previously disabled and events were added, then * put the new config on the PMU. */ static void power_pmu_enable(struct pmu *pmu) { struct perf_event *event; struct cpu_hw_events *cpuhw; unsigned long flags; long i; unsigned long val; s64 left; unsigned int hwc_index[MAX_HWEVENTS]; int n_lim; int idx; if (!ppmu) return; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { local_irq_restore(flags); return; } cpuhw->disabled = 0; /* * If we didn't change anything, or only removed events, * no need to recalculate MMCR* settings and reset the PMCs. * Just reenable the PMU with the current MMCR* settings * (possibly updated for removal of events). */ if (!cpuhw->n_added) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); if (cpuhw->n_events == 0) ppc_set_pmu_inuse(0); goto out_enable; } /* * Compute MMCR* values for the new set of events */ if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, cpuhw->mmcr)) { /* shouldn't ever get here */ printk(KERN_ERR "oops compute_mmcr failed\n"); goto out; } /* * Add in MMCR0 freeze bits corresponding to the * attr.exclude_* bits for the first event. * We have already checked that all events have the * same values for these bits as the first event. */ event = cpuhw->event[0]; if (event->attr.exclude_user) cpuhw->mmcr[0] |= MMCR0_FCP; if (event->attr.exclude_kernel) cpuhw->mmcr[0] |= freeze_events_kernel; if (event->attr.exclude_hv) cpuhw->mmcr[0] |= MMCR0_FCHV; /* * Write the new configuration to MMCR* with the freeze * bit set and set the hardware events to their initial values. * Then unfreeze the events. */ ppc_set_pmu_inuse(1); mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | MMCR0_FC); /* * Read off any pre-existing events that need to move * to another PMC. */ for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { power_pmu_read(event); write_pmc(event->hw.idx, 0); event->hw.idx = 0; } } /* * Initialize the PMCs for all the new and moved events. */ cpuhw->n_limited = n_lim = 0; for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (event->hw.idx) continue; idx = hwc_index[i] + 1; if (is_limited_pmc(idx)) { cpuhw->limited_counter[n_lim] = event; cpuhw->limited_hwidx[n_lim] = idx; ++n_lim; continue; } val = 0; if (event->hw.sample_period) { left = local64_read(&event->hw.period_left); if (left < 0x80000000L) val = 0x80000000L - left; } local64_set(&event->hw.prev_count, val); event->hw.idx = idx; if (event->hw.state & PERF_HES_STOPPED) val = 0; write_pmc(idx, val); perf_event_update_userpage(event); } cpuhw->n_limited = n_lim; cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; out_enable: mb(); write_mmcr0(cpuhw, cpuhw->mmcr[0]); /* * Enable instruction sampling if necessary */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { mb(); mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); } out: local_irq_restore(flags); } static int collect_events(struct perf_event *group, int max_count, struct perf_event *ctrs[], u64 *events, unsigned int *flags) { int n = 0; struct perf_event *event; if (!is_software_event(group)) { if (n >= max_count) return -1; ctrs[n] = group; flags[n] = group->hw.event_base; events[n++] = group->hw.config; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; ctrs[n] = event; flags[n] = event->hw.event_base; events[n++] = event->hw.config; } } return n; } /* * Add a event to the PMU. * If all events are not already frozen, then we disable and * re-enable the PMU in order to get hw_perf_enable to do the * actual work of reconfiguring the PMU. */ static int power_pmu_add(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; unsigned long flags; int n0; int ret = -EAGAIN; local_irq_save(flags); perf_pmu_disable(event->pmu); /* * Add the event to the list (if there is room) * and check whether the total set is still feasible. */ cpuhw = &__get_cpu_var(cpu_hw_events); n0 = cpuhw->n_events; if (n0 >= ppmu->n_counter) goto out; cpuhw->event[n0] = event; cpuhw->events[n0] = event->hw.config; cpuhw->flags[n0] = event->hw.event_base; if (!(ef_flags & PERF_EF_START)) event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuhw->group_flag & PERF_EVENT_TXN) goto nocheck; if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) goto out; if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) goto out; event->hw.config = cpuhw->events[n0]; nocheck: ++cpuhw->n_events; ++cpuhw->n_added; ret = 0; out: perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; } /* * Remove a event from the PMU. */ static void power_pmu_del(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; long i; unsigned long flags; local_irq_save(flags); perf_pmu_disable(event->pmu); power_pmu_read(event); cpuhw = &__get_cpu_var(cpu_hw_events); for (i = 0; i < cpuhw->n_events; ++i) { if (event == cpuhw->event[i]) { while (++i < cpuhw->n_events) { cpuhw->event[i-1] = cpuhw->event[i]; cpuhw->events[i-1] = cpuhw->events[i]; cpuhw->flags[i-1] = cpuhw->flags[i]; } --cpuhw->n_events; ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); if (event->hw.idx) { write_pmc(event->hw.idx, 0); event->hw.idx = 0; } perf_event_update_userpage(event); break; } } for (i = 0; i < cpuhw->n_limited; ++i) if (event == cpuhw->limited_counter[i]) break; if (i < cpuhw->n_limited) { while (++i < cpuhw->n_limited) { cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; } --cpuhw->n_limited; } if (cpuhw->n_events == 0) { /* disable exceptions if no events are running */ cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); } perf_pmu_enable(event->pmu); local_irq_restore(flags); } /* * POWER-PMU does not support disabling individual counters, hence * program their cycle counter to their max value and ignore the interrupts. */ static void power_pmu_start(struct perf_event *event, int ef_flags) { unsigned long flags; s64 left; if (!event->hw.idx || !event->hw.sample_period) return; if (!(event->hw.state & PERF_HES_STOPPED)) return; if (ef_flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); local_irq_save(flags); perf_pmu_disable(event->pmu); event->hw.state = 0; left = local64_read(&event->hw.period_left); write_pmc(event->hw.idx, left); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); } static void power_pmu_stop(struct perf_event *event, int ef_flags) { unsigned long flags; if (!event->hw.idx || !event->hw.sample_period) return; if (event->hw.state & PERF_HES_STOPPED) return; local_irq_save(flags); perf_pmu_disable(event->pmu); power_pmu_read(event); event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; write_pmc(event->hw.idx, 0); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); } /* * Start group events scheduling transaction * Set the flag to make pmu::enable() not perform the * schedulability test, it will be performed at commit time */ void power_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); perf_pmu_disable(pmu); cpuhw->group_flag |= PERF_EVENT_TXN; cpuhw->n_txn_start = cpuhw->n_events; } /* * Stop group events scheduling transaction * Clear the flag and pmu::enable() will perform the * schedulability test. */ void power_pmu_cancel_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); } /* * Commit group events scheduling transaction * Perform the group schedulability test as a whole * Return 0 if success */ int power_pmu_commit_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw; long i, n; if (!ppmu) return -EAGAIN; cpuhw = &__get_cpu_var(cpu_hw_events); n = cpuhw->n_events; if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) return -EAGAIN; i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); if (i < 0) return -EAGAIN; for (i = cpuhw->n_txn_start; i < n; ++i) cpuhw->event[i]->hw.config = cpuhw->events[i]; cpuhw->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); return 0; } /* * Return 1 if we might be able to put event on a limited PMC, * or 0 if not. * A event can only go on a limited PMC if it counts something * that a limited PMC can count, doesn't require interrupts, and * doesn't exclude any processor mode. */ static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, unsigned int flags) { int n; u64 alt[MAX_EVENT_ALTERNATIVES]; if (event->attr.exclude_user || event->attr.exclude_kernel || event->attr.exclude_hv || event->attr.sample_period) return 0; if (ppmu->limited_pmc_event(ev)) return 1; /* * The requested event_id isn't on a limited PMC already; * see if any alternative code goes on a limited PMC. */ if (!ppmu->get_alternatives) return 0; flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; n = ppmu->get_alternatives(ev, flags, alt); return n > 0; } /* * Find an alternative event_id that goes on a normal PMC, if possible, * and return the event_id code, or 0 if there is no such alternative. * (Note: event_id code 0 is "don't count" on all machines.) */ static u64 normal_pmc_alternative(u64 ev, unsigned long flags) { u64 alt[MAX_EVENT_ALTERNATIVES]; int n; flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); n = ppmu->get_alternatives(ev, flags, alt); if (!n) return 0; return alt[0]; } /* Number of perf_events counting hardware events */ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); /* * Release the PMU if this is the last perf_event. */ static void hw_perf_event_destroy(struct perf_event *event) { if (!atomic_add_unless(&num_events, -1, 1)) { mutex_lock(&pmc_reserve_mutex); if (atomic_dec_return(&num_events) == 0) release_pmc_hardware(); mutex_unlock(&pmc_reserve_mutex); } } /* * Translate a generic cache event_id config to a raw event_id code. */ static int hw_perf_cache_event(u64 config, u64 *eventp) { unsigned long type, op, result; int ev; if (!ppmu->cache_events) return -EINVAL; /* unpack config */ type = config & 0xff; op = (config >> 8) & 0xff; result = (config >> 16) & 0xff; if (type >= PERF_COUNT_HW_CACHE_MAX || op >= PERF_COUNT_HW_CACHE_OP_MAX || result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ev = (*ppmu->cache_events)[type][op][result]; if (ev == 0) return -EOPNOTSUPP; if (ev == -1) return -EINVAL; *eventp = ev; return 0; } static int power_pmu_event_init(struct perf_event *event) { u64 ev; unsigned long flags; struct perf_event *ctrs[MAX_HWEVENTS]; u64 events[MAX_HWEVENTS]; unsigned int cflags[MAX_HWEVENTS]; int n; int err; struct cpu_hw_events *cpuhw; if (!ppmu) return -ENOENT; switch (event->attr.type) { case PERF_TYPE_HARDWARE: ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return -EOPNOTSUPP; ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: err = hw_perf_cache_event(event->attr.config, &ev); if (err) return err; break; case PERF_TYPE_RAW: ev = event->attr.config; break; default: return -ENOENT; } event->hw.config_base = ev; event->hw.idx = 0; /* * If we are not running on a hypervisor, force the * exclude_hv bit to 0 so that we don't care what * the user set it to. */ if (!firmware_has_feature(FW_FEATURE_LPAR)) event->attr.exclude_hv = 0; /* * If this is a per-task event, then we can use * PM_RUN_* events interchangeably with their non RUN_* * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. * XXX we should check if the task is an idle task. */ flags = 0; if (event->attach_state & PERF_ATTACH_TASK) flags |= PPMU_ONLY_COUNT_RUN; /* * If this machine has limited events, check whether this * event_id could go on a limited event. */ if (ppmu->flags & PPMU_LIMITED_PMC5_6) { if (can_go_on_limited_pmc(event, ev, flags)) { flags |= PPMU_LIMITED_PMC_OK; } else if (ppmu->limited_pmc_event(ev)) { /* * The requested event_id is on a limited PMC, * but we can't use a limited PMC; see if any * alternative goes on a normal PMC. */ ev = normal_pmc_alternative(ev, flags); if (!ev) return -EINVAL; } } /* * If this is in a group, check if it can go on with all the * other hardware events in the group. We assume the event * hasn't been linked into its leader's sibling list at this point. */ n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, ppmu->n_counter - 1, ctrs, events, cflags); if (n < 0) return -EINVAL; } events[n] = ev; ctrs[n] = event; cflags[n] = flags; if (check_excludes(ctrs, cflags, n, 1)) return -EINVAL; cpuhw = &get_cpu_var(cpu_hw_events); err = power_check_constraints(cpuhw, events, cflags, n + 1); put_cpu_var(cpu_hw_events); if (err) return -EINVAL; event->hw.config = events[n]; event->hw.event_base = cflags[n]; event->hw.last_period = event->hw.sample_period; local64_set(&event->hw.period_left, event->hw.last_period); /* * See if we need to reserve the PMU. * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing * reserve_pmc_hardware or release_pmc_hardware. */ err = 0; if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); if (atomic_read(&num_events) == 0 && reserve_pmc_hardware(perf_event_interrupt)) err = -EBUSY; else atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); } event->destroy = hw_perf_event_destroy; return err; } struct pmu power_pmu = { .pmu_enable = power_pmu_enable, .pmu_disable = power_pmu_disable, .event_init = power_pmu_event_init, .add = power_pmu_add, .del = power_pmu_del, .start = power_pmu_start, .stop = power_pmu_stop, .read = power_pmu_read, .start_txn = power_pmu_start_txn, .cancel_txn = power_pmu_cancel_txn, .commit_txn = power_pmu_commit_txn, }; /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled * here so there is no possibility of being interrupted. */ static void record_and_restart(struct perf_event *event, unsigned long val, struct pt_regs *regs) { u64 period = event->hw.sample_period; s64 prev, delta, left; int record = 0; if (event->hw.state & PERF_HES_STOPPED) { write_pmc(event->hw.idx, 0); return; } /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); delta = check_and_compute_delta(prev, val); local64_add(delta, &event->count); /* * See if the total period for this event has expired, * and update for the next period. */ val = 0; left = local64_read(&event->hw.period_left) - delta; if (period) { if (left <= 0) { left += period; if (left <= 0) left = period; record = 1; event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) val = 0x80000000LL - left; } write_pmc(event->hw.idx, val); local64_set(&event->hw.prev_count, val); local64_set(&event->hw.period_left, left); perf_event_update_userpage(event); /* * Finally record data if requested. */ if (record) { struct perf_sample_data data; perf_sample_data_init(&data, ~0ULL); data.period = event->hw.last_period; if (event->attr.sample_type & PERF_SAMPLE_ADDR) perf_get_data_addr(regs, &data.addr); if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); } } /* * Called from generic code to get the misc flags (i.e. processor mode) * for an event_id. */ unsigned long perf_misc_flags(struct pt_regs *regs) { u32 flags = perf_get_misc_flags(regs); if (flags) return flags; return user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL; } /* * Called from generic code to get the instruction pointer * for an event_id. */ unsigned long perf_instruction_pointer(struct pt_regs *regs) { unsigned long ip; if (TRAP(regs) != 0xf00) return regs->nip; /* not a PMU interrupt */ ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); return ip; } static bool pmc_overflow(unsigned long val) { if ((int)val < 0) return true; /* * Events on POWER7 can roll back if a speculative event doesn't * eventually complete. Unfortunately in some rare cases they will * raise a performance monitor exception. We need to catch this to * ensure we reset the PMC. In all cases the PMC will be 256 or less * cycles from overflow. * * We only do this if the first pass fails to find any overflowing * PMCs because a user might set a period of less than 256 and we * don't want to mistakenly reset them. */ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) return true; return false; } /* * Performance monitor interrupt stuff */ static void perf_event_interrupt(struct pt_regs *regs) { int i; struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct perf_event *event; unsigned long val; int found = 0; int nmi; if (cpuhw->n_limited) freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), mfspr(SPRN_PMC6)); perf_read_regs(regs); nmi = perf_intr_is_nmi(regs); if (nmi) nmi_enter(); else irq_enter(); for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (!event->hw.idx || is_limited_pmc(event->hw.idx)) continue; val = read_pmc(event->hw.idx); if ((int)val < 0) { /* event has overflowed */ found = 1; record_and_restart(event, val, regs); } } /* * In case we didn't find and reset the event that caused * the interrupt, scan all events and reset any that are * negative, to avoid getting continual interrupts. * Any that we processed in the previous loop will not be negative. */ if (!found) { for (i = 0; i < ppmu->n_counter; ++i) { if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); if (pmc_overflow(val)) write_pmc(i + 1, 0); } } /* * Reset MMCR0 to its normal value. This will set PMXE and * clear FC (freeze counters) and PMAO (perf mon alert occurred) * and thus allow interrupts to occur again. * XXX might want to use MSR.PM to keep the events frozen until * we get back out of this interrupt. */ write_mmcr0(cpuhw, cpuhw->mmcr[0]); if (nmi) nmi_exit(); else irq_exit(); } static void power_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); if (!ppmu) return; memset(cpuhw, 0, sizeof(*cpuhw)); cpuhw->mmcr[0] = MMCR0_FC; } static int __cpuinit power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: power_pmu_setup(cpu); break; default: break; } return NOTIFY_OK; } int register_power_pmu(struct power_pmu *pmu) { if (ppmu) return -EBUSY; /* something's already registered */ ppmu = pmu; pr_info("%s performance monitor hardware support registered\n", pmu->name); #ifdef MSR_HV /* * Use FCHV to ignore kernel events if MSR.HV is set. */ if (mfmsr() & MSR_HV) freeze_events_kernel = MMCR0_FCHV; #endif /* CONFIG_PPC64 */ perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); perf_cpu_notifier(power_pmu_notifier); return 0; }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_13
crossvul-cpp_data_bad_5104_0
/* packet-dcerpc-spoolss.c * Routines for SMB \PIPE\spoolss packet disassembly * Copyright 2001-2003, Tim Potter <tpot@samba.org> * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* TODO list: - audit of item lengths */ #include "config.h" #include <epan/packet.h> #include <epan/expert.h> #include "packet-dcerpc.h" #include "packet-dcerpc-nt.h" #include "packet-dcerpc-spoolss.h" #include "packet-windows-common.h" void proto_register_dcerpc_spoolss(void); void proto_reg_handoff_dcerpc_spoolss(void); /* GetPrinterDriver2 */ static int hf_clientmajorversion = -1; static int hf_clientminorversion = -1; static int hf_servermajorversion = -1; static int hf_serverminorversion = -1; static int hf_driverpath = -1; static int hf_datafile = -1; static int hf_configfile = -1; static int hf_helpfile = -1; static int hf_monitorname = -1; static int hf_defaultdatatype = -1; static int hf_driverinfo_cversion = -1; static int hf_dependentfiles = -1; static int hf_previousdrivernames = -1; static int hf_driverdate = -1; static int hf_padding = -1; static int hf_driver_version_low = -1; static int hf_driver_version_high = -1; static int hf_mfgname = -1; static int hf_oemurl = -1; static int hf_hardwareid= -1; static int hf_provider = -1; /* GetPrinter */ /* Times */ static int hf_start_time = -1; static int hf_end_time = -1; static int hf_elapsed_time = -1; /****************************************************************************/ /* * New hf index values - I'm in the process of doing a bit of a cleanup -tpot */ static int hf_opnum = -1; static int hf_hnd = -1; static int hf_rc = -1; static int hf_offered = -1; static int hf_needed = -1; static int hf_returned = -1; static int hf_buffer_size = -1; static int hf_buffer_data = -1; static int hf_string_parm_size = -1; static int hf_string_parm_data= -1; static int hf_offset = -1; static int hf_level = -1; static int hf_access_required = -1; static int hf_printername = -1; static int hf_machinename = -1; static int hf_notifyname = -1; static int hf_printerdesc = -1; static int hf_printercomment = -1; static int hf_servername = -1; static int hf_sharename = -1; static int hf_portname = -1; static int hf_printerlocation = -1; static int hf_drivername = -1; static int hf_environment = -1; static int hf_username = -1; static int hf_documentname = -1; static int hf_outputfile = -1; static int hf_datatype = -1; static int hf_textstatus = -1; static int hf_sepfile = -1; static int hf_printprocessor = -1; static int hf_parameters = -1; /* Printer information */ static int hf_printer_cjobs = -1; static int hf_printer_total_jobs = -1; static int hf_printer_total_bytes = -1; static int hf_printer_global_counter = -1; static int hf_printer_total_pages = -1; static int hf_printer_major_version = -1; static int hf_printer_build_version = -1; static int hf_printer_unk7 = -1; static int hf_printer_unk8 = -1; static int hf_printer_unk9 = -1; static int hf_printer_session_ctr = -1; static int hf_printer_unk11 = -1; static int hf_printer_printer_errors = -1; static int hf_printer_unk13 = -1; static int hf_printer_unk14 = -1; static int hf_printer_unk15 = -1; static int hf_printer_unk16 = -1; static int hf_printer_changeid = -1; static int hf_printer_unk18 = -1; static int hf_printer_unk20 = -1; static int hf_printer_c_setprinter = -1; static int hf_printer_unk22 = -1; static int hf_printer_unk23 = -1; static int hf_printer_unk24 = -1; static int hf_printer_unk25 = -1; static int hf_printer_unk26 = -1; static int hf_printer_unk27 = -1; static int hf_printer_unk28 = -1; static int hf_printer_unk29 = -1; static int hf_printer_flags = -1; static int hf_printer_priority = -1; static int hf_printer_default_priority = -1; static int hf_printer_jobs = -1; static int hf_printer_averageppm = -1; static int hf_printer_guid = -1; static int hf_printer_action = -1; /* Printer data */ static int hf_printerdata = -1; static int hf_printerdata_key = -1; static int hf_printerdata_value = -1; static int hf_printerdata_type = -1; static int hf_printerdata_size = -1; /* Length of printer data */ static int hf_printerdata_data = -1; static int hf_printerdata_data_sz = -1; static int hf_printerdata_data_dword = -1; /* Devicemode */ static int hf_devmodectr_size = -1; static int hf_devmode = -1; static int hf_devmode_size = -1; static int hf_devmode_spec_version = -1; static int hf_devmode_driver_version = -1; static int hf_devmode_size2 = -1; static int hf_devmode_driver_extra_len = -1; static int hf_devmode_fields = -1; static int hf_devmode_orientation = -1; static int hf_devmode_paper_size = -1; static int hf_devmode_paper_width = -1; static int hf_devmode_paper_length = -1; static int hf_devmode_scale = -1; static int hf_devmode_copies = -1; static int hf_devmode_default_source = -1; static int hf_devmode_print_quality = -1; static int hf_devmode_color = -1; static int hf_devmode_duplex = -1; static int hf_devmode_y_resolution = -1; static int hf_devmode_tt_option = -1; static int hf_devmode_collate = -1; static int hf_devmode_log_pixels = -1; static int hf_devmode_bits_per_pel = -1; static int hf_devmode_pels_width = -1; static int hf_devmode_pels_height = -1; static int hf_devmode_display_flags = -1; static int hf_devmode_display_freq = -1; static int hf_devmode_icm_method = -1; static int hf_devmode_icm_intent = -1; static int hf_devmode_media_type = -1; static int hf_devmode_dither_type = -1; static int hf_devmode_reserved1 = -1; static int hf_devmode_reserved2 = -1; static int hf_devmode_panning_width = -1; static int hf_devmode_panning_height = -1; static int hf_devmode_driver_extra = -1; static int hf_devmode_fields_orientation = -1; static int hf_devmode_fields_papersize = -1; static int hf_devmode_fields_paperlength = -1; static int hf_devmode_fields_paperwidth = -1; static int hf_devmode_fields_scale = -1; static int hf_devmode_fields_position = -1; static int hf_devmode_fields_nup = -1; static int hf_devmode_fields_copies = -1; static int hf_devmode_fields_defaultsource = -1; static int hf_devmode_fields_printquality = -1; static int hf_devmode_fields_color = -1; static int hf_devmode_fields_duplex = -1; static int hf_devmode_fields_yresolution = -1; static int hf_devmode_fields_ttoption = -1; static int hf_devmode_fields_collate = -1; static int hf_devmode_fields_formname = -1; static int hf_devmode_fields_logpixels = -1; static int hf_devmode_fields_bitsperpel = -1; static int hf_devmode_fields_pelswidth = -1; static int hf_devmode_fields_pelsheight = -1; static int hf_devmode_fields_displayflags = -1; static int hf_devmode_fields_displayfrequency = -1; static int hf_devmode_fields_icmmethod = -1; static int hf_devmode_fields_icmintent = -1; static int hf_devmode_fields_mediatype = -1; static int hf_devmode_fields_dithertype = -1; static int hf_devmode_fields_panningwidth = -1; static int hf_devmode_fields_panningheight = -1; /* Print job */ static int hf_job_id = -1; static int hf_job_priority = -1; static int hf_job_position = -1; static int hf_job_totalpages = -1; static int hf_job_totalbytes = -1; static int hf_job_pagesprinted = -1; static int hf_job_bytesprinted = -1; static int hf_job_size = -1; static int hf_job_status = -1; static int hf_job_status_paused = -1; static int hf_job_status_error = -1; static int hf_job_status_deleting = -1; static int hf_job_status_spooling = -1; static int hf_job_status_printing = -1; static int hf_job_status_offline = -1; static int hf_job_status_paperout = -1; static int hf_job_status_printed = -1; static int hf_job_status_deleted = -1; static int hf_job_status_blocked = -1; static int hf_job_status_user_intervention = -1; /* Forms */ static int hf_form = -1; static int hf_form_level = -1; static int hf_form_name = -1; static int hf_form_flags = -1; static int hf_form_unknown = -1; static int hf_form_width = -1; static int hf_form_height = -1; static int hf_form_left_margin = -1; static int hf_form_top_margin = -1; static int hf_form_horiz_len = -1; static int hf_form_vert_len = -1; static int hf_enumforms_num = -1; /* Print notify */ static int hf_notify_options_version = -1; static int hf_notify_options_flags = -1; static int hf_notify_options_flags_refresh = -1; static int hf_notify_options_count = -1; static int hf_notify_option_type = -1; static int hf_notify_option_reserved1 = -1; static int hf_notify_option_reserved2 = -1; static int hf_notify_option_reserved3 = -1; static int hf_notify_option_count = -1; static int hf_notify_option_data_count = -1; static int hf_notify_info_count = -1; static int hf_notify_info_version = -1; static int hf_notify_info_flags = -1; static int hf_notify_info_data_type = -1; static int hf_notify_info_data_count = -1; static int hf_notify_info_data_id = -1; static int hf_notify_info_data_value1 = -1; static int hf_notify_info_data_value2 = -1; static int hf_notify_info_data_bufsize = -1; static int hf_notify_info_data_buffer = -1; static int hf_notify_info_data_buffer_len = -1; static int hf_notify_info_data_buffer_data = -1; static int hf_notify_field = -1; static int hf_printerlocal = -1; static int hf_rrpcn_changelow = -1; static int hf_rrpcn_changehigh = -1; static int hf_rrpcn_unk0 = -1; static int hf_rrpcn_unk1 = -1; static int hf_replyopenprinter_unk0 = -1; static int hf_replyopenprinter_unk1 = -1; static int hf_devmode_devicename = -1; static int hf_devmode_form_name = -1; static int hf_relative_string = -1; static int hf_value_name = -1; static int hf_keybuffer = -1; static int hf_value_string = -1; static expert_field ei_unimplemented_dissector = EI_INIT; static expert_field ei_unknown_data = EI_INIT; static expert_field ei_spool_printer_info_level = EI_INIT; static expert_field ei_printer_info_level = EI_INIT; static expert_field ei_form_level = EI_INIT; static expert_field ei_job_info_level = EI_INIT; static expert_field ei_driver_info_level = EI_INIT; static expert_field ei_level = EI_INIT; static expert_field ei_notify_info_data_type = EI_INIT; static expert_field ei_enumprinterdataex_value = EI_INIT; /* Registry data types */ #define DCERPC_REG_NONE 0 #define DCERPC_REG_SZ 1 #define DCERPC_REG_EXPAND_SZ 2 #define DCERPC_REG_BINARY 3 #define DCERPC_REG_DWORD 4 #define DCERPC_REG_DWORD_LE 4 /* DWORD, little endian */ #define DCERPC_REG_DWORD_BE 5 /* DWORD, big endian */ #define DCERPC_REG_LINK 6 #define DCERPC_REG_MULTI_SZ 7 #define DCERPC_REG_RESOURCE_LIST 8 #define DCERPC_REG_FULL_RESOURCE_DESCRIPTOR 9 #define DCERPC_REG_RESOURCE_REQUIREMENTS_LIST 10 static const value_string reg_datatypes[] = { { DCERPC_REG_NONE, "REG_NONE" }, { DCERPC_REG_SZ, "REG_SZ" }, { DCERPC_REG_EXPAND_SZ, "REG_EXPAND_SZ" }, { DCERPC_REG_BINARY, "REG_BINARY" }, { DCERPC_REG_DWORD, "REG_DWORD" }, /* { DCERPC_REG_DWORD_LE, "REG_DWORD_LE" }, */ { DCERPC_REG_DWORD_BE, "REG_DWORD_BE" }, { DCERPC_REG_LINK, "REG_LINK" }, { DCERPC_REG_MULTI_SZ, "REG_MULTI_SZ" }, { DCERPC_REG_RESOURCE_LIST, "REG_RESOURCE_LIST" }, { DCERPC_REG_FULL_RESOURCE_DESCRIPTOR, "REG_FULL_RESOURCE_DESCRIPTOR" }, { DCERPC_REG_RESOURCE_REQUIREMENTS_LIST, "REG_RESOURCE_REQUIREMENTS_LIST" }, {0, NULL } }; static value_string_ext reg_datatypes_ext = VALUE_STRING_EXT_INIT(reg_datatypes); /****************************************************************************/ /* * Dissect SPOOLSS specific access rights */ static int hf_server_access_admin = -1; static int hf_server_access_enum = -1; static int hf_printer_access_admin = -1; static int hf_printer_access_use = -1; static int hf_job_access_admin = -1; static void spoolss_printer_specific_rights(tvbuff_t *tvb, gint offset, proto_tree *tree, guint32 access) { proto_tree_add_boolean( tree, hf_printer_access_use, tvb, offset, 4, access); proto_tree_add_boolean( tree, hf_printer_access_admin, tvb, offset, 4, access); } struct access_mask_info spoolss_printer_access_mask_info = { "SPOOLSS printer", spoolss_printer_specific_rights, NULL, /* Generic mapping table */ NULL /* Standard mapping table */ }; static void spoolss_printserver_specific_rights(tvbuff_t *tvb, gint offset, proto_tree *tree, guint32 access) { proto_tree_add_boolean( tree, hf_server_access_enum, tvb, offset, 4, access); proto_tree_add_boolean( tree, hf_server_access_admin, tvb, offset, 4, access); } struct access_mask_info spoolss_printserver_access_mask_info = { "SPOOLSS print server", spoolss_printserver_specific_rights, NULL, /* Generic mapping table */ NULL /* Standard mapping table */ }; static void spoolss_job_specific_rights(tvbuff_t *tvb, gint offset, proto_tree *tree, guint32 access) { proto_tree_add_boolean( tree, hf_job_access_admin, tvb, offset, 4, access); } struct access_mask_info spoolss_job_access_mask_info = { "SPOOLSS job", spoolss_job_specific_rights, NULL, /* Generic mapping table */ NULL /* Standard mapping table */ }; /* * Routines to dissect a spoolss BUFFER */ typedef struct { tvbuff_t *tvb; proto_item *tree; /* Proto tree buffer located in */ proto_item *item; } BUFFER; static gint ett_BUFFER = -1; static int dissect_spoolss_buffer_data(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { BUFFER *b = (BUFFER *)di->private_data; proto_item *item; guint32 size; const guint8 *data; if (di->conformant_run) return offset; /* Dissect size and data */ offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_buffer_size, &size); offset = dissect_ndr_uint8s(tvb, offset, pinfo, NULL, di, drep, hf_buffer_data, size, &data); item = proto_tree_add_item( tree, hf_buffer_data, tvb, offset - size, size, ENC_NA); /* Return buffer info */ if (b) { /* I'm not sure about this. Putting the buffer into its own tvb makes sense and the dissection code is much clearer, but the data is a proper subset of the actual tvb. Not adding the new data source makes the hex display confusing as it switches between the 'DCERPC over SMB' tvb and the buffer tvb with no visual cues as to what is going on. */ b->tvb = tvb_new_child_real_data(tvb, data, size, size); add_new_data_source(pinfo, b->tvb, "SPOOLSS buffer"); b->item = item; b->tree = proto_item_add_subtree(item, ett_BUFFER); } return offset; } /* Dissect a spoolss buffer and return buffer data */ static int dissect_spoolss_buffer(tvbuff_t *tvb, gint offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep, BUFFER *b) { if (b) memset(b, 0, sizeof(BUFFER)); di->private_data = b; offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_spoolss_buffer_data, NDR_POINTER_UNIQUE, "Buffer", -1); return offset; } static int dissect_spoolss_string_parm_data(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 buffer_len, len; gchar *s; proto_item *item = NULL; if (di->conformant_run) return offset; /* Dissect size and data */ offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_string_parm_size, &buffer_len); s = tvb_get_stringz_enc(wmem_packet_scope(), tvb, offset, &len, ENC_UTF_16|ENC_LITTLE_ENDIAN); if (tree && buffer_len) { tvb_ensure_bytes_exist(tvb, offset, buffer_len); item = proto_tree_add_string( tree, hf_string_parm_data, tvb, offset, len, s); } offset += buffer_len; col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", s); /* Append string to upper level item */ if (tree && item) { item = item->parent != NULL ? item->parent : item; proto_item_append_text(item, ": %s", s); } return offset; } /* Dissect a spoolss string parameter */ static int dissect_spoolss_string_parm(tvbuff_t *tvb, gint offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep, const char *text) { offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_spoolss_string_parm_data, NDR_POINTER_UNIQUE, text, -1); return offset; } /* * SYSTEM_TIME */ static gint ett_SYSTEM_TIME = -1; static int hf_time_year = -1; static int hf_time_month = -1; static int hf_time_dow = -1; static int hf_time_day = -1; static int hf_time_hour = -1; static int hf_time_minute = -1; static int hf_time_second = -1; static int hf_time_msec = -1; static int dissect_SYSTEM_TIME(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep, const char *name, gboolean add_subtree, char **data) { proto_item *item = NULL; proto_tree *subtree = tree; guint16 year, month, day, hour, minute, second, millisecond; char *str; if (add_subtree) { subtree = proto_tree_add_subtree(tree, tvb, offset, 16, ett_SYSTEM_TIME, &item, name); } offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_time_year, &year); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_time_month, &month); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_time_dow, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_time_day, &day); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_time_hour, &hour); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_time_minute, &minute); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_time_second, &second); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_time_msec, &millisecond); str = wmem_strdup_printf(wmem_packet_scope(), "%d/%02d/%02d %02d:%02d:%02d.%03d", year, month, day, hour, minute, second, millisecond); if (add_subtree) proto_item_append_text(item, ": %s", str); if (data) *data = str; return offset; } static int dissect_SYSTEM_TIME_ptr(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; char *str; offset = dissect_SYSTEM_TIME( tvb, offset, pinfo, tree, di, drep, NULL, FALSE, &str); dcv->private_data = str; return offset; } /* * SpoolssClosePrinter */ static int SpoolssClosePrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { e_ctx_hnd policy_hnd; char *pol_name; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, NULL, FALSE, TRUE); dcerpc_fetch_polhnd_data(&policy_hnd, &pol_name, NULL, NULL, NULL, pinfo->num); if (pol_name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", pol_name); return offset; } static int SpoolssClosePrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* Dissect some printer data. The get/set/enum printerdata routines all store value/data in a uint8 array. We could use the ndr routines for this but that would result in one item for each byte in the printer data. */ static gint ett_printerdata_data = -1; static gint ett_printerdata_value = -1; static int dissect_printerdata_data(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_, guint32 type) { proto_item *item, *hidden_item; proto_tree *subtree; guint32 size; subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_printerdata_data, &item, "Data"); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_printerdata_size, &size); if (size) { offset = dissect_ndr_uint8s( tvb, offset, pinfo, subtree, di, drep, hf_printerdata_data, size, NULL); switch(type) { case DCERPC_REG_SZ: { char *data = tvb_get_string_enc(NULL, tvb, offset - size, size, ENC_UTF_16|ENC_LITTLE_ENDIAN); proto_item_append_text(item, ": %s", data); col_append_fstr( pinfo->cinfo, COL_INFO, " = %s", data); hidden_item = proto_tree_add_string( tree, hf_printerdata_data_sz, tvb, offset - size, size, data); PROTO_ITEM_SET_HIDDEN(hidden_item); g_free(data); break; } case DCERPC_REG_DWORD: { guint32 data = tvb_get_letohl(tvb, offset - size); proto_item_append_text(item, ": 0x%08x", data); col_append_fstr( pinfo->cinfo, COL_INFO, " = 0x%08x", data); hidden_item = proto_tree_add_uint( tree, hf_printerdata_data_dword, tvb, offset - size, 4, data); PROTO_ITEM_SET_HIDDEN(hidden_item); break; } case DCERPC_REG_BINARY: col_append_str( pinfo->cinfo, COL_INFO, " = <binary data>"); break; default: break; } } proto_item_set_len(item, size + 4); return offset; } /* * SpoolssGetPrinterData */ static int SpoolssGetPrinterData_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; char *value_name; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); value_name=NULL; offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_printerdata_value, TRUE, value_name ? NULL : &value_name); /* GetPrinterData() stores the printerdata in se_data */ if(!pinfo->fd->flags.visited){ if(!dcv->se_data){ if(value_name){ dcv->se_data = wmem_strdup(wmem_file_scope(), value_name); } } } col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", value_name); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static int SpoolssGetPrinterData_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 type; proto_item *hidden_item; const char *data; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_printerdata_type, &type); data = (const char *)(dcv->se_data ? dcv->se_data : "????"); col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", data); offset = dissect_printerdata_data( tvb, offset, pinfo, tree, di, drep, type); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * SpoolssGetPrinterDataEx */ static int SpoolssGetPrinterDataEx_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; char *key_name, *value_name; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); key_name=NULL; offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_printerdata_key, TRUE, &key_name); value_name=NULL; offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_printerdata_value, TRUE, &value_name); /* GetPrinterDataEx() stores the key/value in se_data */ if(!pinfo->fd->flags.visited){ if(!dcv->se_data){ dcv->se_data = wmem_strdup_printf(wmem_file_scope(), "%s==%s", key_name?key_name:"", value_name?value_name:""); } } if (dcv->se_data) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", (char *)dcv->se_data); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); return offset; } static int SpoolssGetPrinterDataEx_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 size, type; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printerdata_type, &type); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_returned, &size); if (dcv->se_data) { col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", (char *)dcv->se_data); } if (size) dissect_printerdata_data(tvb, offset, pinfo, tree, di, drep, type); offset += size; offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * SpoolssSetPrinterData */ static int SpoolssSetPrinterData_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; char *value_name; guint32 type; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); value_name=NULL; offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_printerdata_value, TRUE, &value_name); /* GetPrinterDataEx() stores the key/value in se_data */ if(!pinfo->fd->flags.visited){ if(!dcv->se_data){ dcv->se_data = wmem_strdup_printf(wmem_file_scope(), "%s", value_name?value_name:""); } } if (dcv->se_data){ col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", (char *)dcv->se_data); } offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_printerdata_type, &type); offset = dissect_printerdata_data( tvb, offset, pinfo, tree, di, drep, type); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static int SpoolssSetPrinterData_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * SpoolssSetPrinterDataEx */ static int hf_setprinterdataex_max_len = -1; static int hf_setprinterdataex_real_len = -1; static int hf_setprinterdataex_data = -1; static int SpoolssSetPrinterDataEx_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { char *key_name, *value_name; guint32 max_len; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_printerdata_key, TRUE, &key_name); offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_printerdata_value, TRUE, &value_name); col_append_fstr(pinfo->cinfo, COL_INFO, ", %s/%s", key_name, value_name); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_printerdata_type, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_setprinterdataex_max_len, &max_len); offset = dissect_ndr_uint8s( tvb, offset, pinfo, tree, di, drep, hf_setprinterdataex_data, max_len, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_setprinterdataex_real_len, NULL); return offset; } static int SpoolssSetPrinterDataEx_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* XXX - "name" should be an hf_ value for an FT_STRING. */ static int dissect_spoolss_uint16uni(tvbuff_t *tvb, int offset, packet_info *pinfo _U_, proto_tree *tree, guint8 *drep _U_, char **data, int hf_name) { gint len, remaining; char *text; if (offset % 2) offset += 2 - (offset % 2); /* Get remaining data in buffer as a string */ remaining = tvb_captured_length_remaining(tvb, offset); if (remaining <= 0) { if (data) *data = g_strdup(""); return offset; } text = tvb_get_string_enc(NULL, tvb, offset, remaining, ENC_UTF_16|ENC_LITTLE_ENDIAN); len = (int)strlen(text); proto_tree_add_string(tree, hf_name, tvb, offset, len * 2, text); if (data) *data = text; else g_free(text); return offset + (len + 1) * 2; } /* * DEVMODE */ /* Devicemode orientation values */ static const value_string devmode_orientation_vals[] = { { DEVMODE_ORIENTATION_PORTRAIT, "Portrait" }, { DEVMODE_ORIENTATION_LANDSCAPE, "Landscape" }, { 0, NULL } }; /* Paper size values. International paper sizes is a fascinating topic. No seriously! (-: */ static const value_string devmode_papersize_vals[] = { { DEVMODE_PAPERSIZE_LETTER, "Letter" }, { DEVMODE_PAPERSIZE_LETTERSMALL, "Letter (small)" }, { DEVMODE_PAPERSIZE_TABLOID, "Tabloid" }, { DEVMODE_PAPERSIZE_LEDGER, "Ledger" }, { DEVMODE_PAPERSIZE_LEGAL, "Legal" }, { DEVMODE_PAPERSIZE_STATEMENT, "Statement" }, { DEVMODE_PAPERSIZE_EXECUTIVE, "Executive" }, { DEVMODE_PAPERSIZE_A3, "A3" }, { DEVMODE_PAPERSIZE_A4, "A4" }, { DEVMODE_PAPERSIZE_A4SMALL, "A4 (small)" }, { DEVMODE_PAPERSIZE_A5, "A5" }, { DEVMODE_PAPERSIZE_B4, "B4" }, { DEVMODE_PAPERSIZE_B5, "B5" }, { DEVMODE_PAPERSIZE_FOLIO, "Folio" }, { DEVMODE_PAPERSIZE_QUARTO, "Quarto" }, { DEVMODE_PAPERSIZE_10X14, "10x14" }, { DEVMODE_PAPERSIZE_11X17, "11x17" }, { DEVMODE_PAPERSIZE_NOTE, "Note" }, { DEVMODE_PAPERSIZE_ENV9, "Envelope #9" }, { DEVMODE_PAPERSIZE_ENV10, "Envelope #10" }, { DEVMODE_PAPERSIZE_ENV11, "Envelope #11" }, { DEVMODE_PAPERSIZE_ENV12, "Envelope #12" }, { DEVMODE_PAPERSIZE_ENV14, "Envelope #14" }, { DEVMODE_PAPERSIZE_CSHEET, "C sheet" }, { DEVMODE_PAPERSIZE_DSHEET, "D sheet" }, { DEVMODE_PAPERSIZE_ESHEET, "E sheet" }, { DEVMODE_PAPERSIZE_ENVDL, "Envelope DL" }, { DEVMODE_PAPERSIZE_ENVC5, "Envelope C5" }, { DEVMODE_PAPERSIZE_ENVC3, "Envelope C3" }, { DEVMODE_PAPERSIZE_ENVC4, "Envelope C4" }, { DEVMODE_PAPERSIZE_ENVC6, "Envelope C6" }, { DEVMODE_PAPERSIZE_ENVC65, "Envelope C65" }, { DEVMODE_PAPERSIZE_ENVB4, "Envelope B4" }, { DEVMODE_PAPERSIZE_ENVB5, "Envelope B5" }, { DEVMODE_PAPERSIZE_ENVB6, "Envelope B6" }, { DEVMODE_PAPERSIZE_ENVITALY, "Envelope (Italy)" }, { DEVMODE_PAPERSIZE_ENVMONARCH, "Envelope (Monarch)" }, { DEVMODE_PAPERSIZE_ENVPERSONAL, "Envelope (Personal)" }, { DEVMODE_PAPERSIZE_FANFOLDUS, "Fanfold (US)" }, { DEVMODE_PAPERSIZE_FANFOLDSTDGERMAN, "Fanfold (Std German)" }, { DEVMODE_PAPERSIZE_FANFOLDLGLGERMAN, "Fanfold (Legal German)" }, { DEVMODE_PAPERSIZE_ISOB4, "B4 (ISO)" }, { DEVMODE_PAPERSIZE_JAPANESEPOSTCARD, "Japanese postcard" }, { DEVMODE_PAPERSIZE_9X11, "9x11" }, { DEVMODE_PAPERSIZE_10X11, "10x11" }, { DEVMODE_PAPERSIZE_15X11, "15x11" }, { DEVMODE_PAPERSIZE_ENVINVITE, "Envelope (Invite)" }, { DEVMODE_PAPERSIZE_RESERVED48, "Reserved (48)" }, { DEVMODE_PAPERSIZE_RESERVED49, "Reserved (49)" }, { DEVMODE_PAPERSIZE_LETTEREXTRA, "Letter (Extra)" }, { DEVMODE_PAPERSIZE_LEGALEXTRA, "Legal (Extra)" }, { DEVMODE_PAPERSIZE_TABLOIDEXTRA, "Tabloid (Extra)" }, { DEVMODE_PAPERSIZE_A4EXTRA, "A4 (Extra)" }, { DEVMODE_PAPERSIZE_LETTERTRANS, "Letter (Transverse)" }, { DEVMODE_PAPERSIZE_A4TRANS, "A4 (Transverse)" }, { DEVMODE_PAPERSIZE_LETTEREXTRATRANS, "Letter (Extra, Transverse)" }, { DEVMODE_PAPERSIZE_APLUS, "A+" }, { DEVMODE_PAPERSIZE_BPLUS, "B+" }, { DEVMODE_PAPERSIZE_LETTERPLUS, "Letter+" }, { DEVMODE_PAPERSIZE_A4PLUS, "A4+" }, { DEVMODE_PAPERSIZE_A5TRANS, "A5 (Transverse)" }, { DEVMODE_PAPERSIZE_B5TRANS, "B5 (Transverse)" }, { DEVMODE_PAPERSIZE_A3EXTRA, "A3 (Extra)" }, { DEVMODE_PAPERSIZE_A5EXTRA, "A5 (Extra)" }, { DEVMODE_PAPERSIZE_B5EXTRA, "B5 (Extra)" }, { DEVMODE_PAPERSIZE_A2, "A2" }, { DEVMODE_PAPERSIZE_A3TRANS, "A3 (Transverse)" }, { DEVMODE_PAPERSIZE_A3EXTRATRANS, "A3 (Extra, Transverse" }, { DEVMODE_PAPERSIZE_DBLJAPANESEPOSTCARD, "Double Japanese Postcard" }, { DEVMODE_PAPERSIZE_A6, "A6" }, { DEVMODE_PAPERSIZE_JENVKAKU2, "Japanese Envelope (Kaku #2)" }, { DEVMODE_PAPERSIZE_JENVKAKU3, "Japanese Envelope (Kaku #3)" }, { DEVMODE_PAPERSIZE_JENVCHOU3, "Japanese Envelope (Chou #3)" }, { DEVMODE_PAPERSIZE_JENVCHOU4, "Japaneve Envelope (Chou #4)" }, { DEVMODE_PAPERSIZE_LETTERROT, "Letter (Rotated)" }, { DEVMODE_PAPERSIZE_A3ROT, "A3 (Rotated)" }, { DEVMODE_PAPERSIZE_A4ROT, "A4 (Rotated)" }, { DEVMODE_PAPERSIZE_A5ROT, "A5 (Rotated)" }, { DEVMODE_PAPERSIZE_B4JISROT, "B4 (JIS, Rotated)" }, { DEVMODE_PAPERSIZE_B5JISROT, "B5 (JIS, Rotated)"}, { DEVMODE_PAPERSIZE_JAPANESEPOSTCARDROT, "Japanese Postcard (Rotated)" }, { DEVMODE_PAPERSIZE_DBLJAPANESEPOSTCARDROT82, "Double Japanese Postcard (Rotated)" }, { DEVMODE_PAPERSIZE_A6ROT, "A6 (Rotated)" }, { DEVMODE_PAPERSIZE_JENVKAKU2ROT, "Japanese Envelope (Kaku #2, Rotated)" }, { DEVMODE_PAPERSIZE_JENVKAKU3ROT, "Japanese Envelope (Kaku #3, Rotated)" }, { DEVMODE_PAPERSIZE_JENVCHOU3ROT, "Japanese Envelope (Chou #3, Rotated)" }, { DEVMODE_PAPERSIZE_JENVCHOU4ROT, "Japanese Envelope (Chou #4, Rotated)" }, { DEVMODE_PAPERSIZE_B6JIS, "B6 (JIS)" }, { DEVMODE_PAPERSIZE_B6JISROT, "B6 (JIS, Rotated)" }, { DEVMODE_PAPERSIZE_12X11, "12x11" }, { DEVMODE_PAPERSIZE_JENVYOU4, "Japanese Envelope (You #4)" }, { DEVMODE_PAPERSIZE_JENVYOU4ROT, "Japanese Envelope (You #4, Rotated" }, { DEVMODE_PAPERSIZE_P16K, "PRC 16K" }, { DEVMODE_PAPERSIZE_P32K, "PRC 32K" }, { DEVMODE_PAPERSIZE_P32KBIG, "P32K (Big)" }, { DEVMODE_PAPERSIZE_PENV1, "PRC Envelope #1" }, { DEVMODE_PAPERSIZE_PENV2, "PRC Envelope #2" }, { DEVMODE_PAPERSIZE_PENV3, "PRC Envelope #3" }, { DEVMODE_PAPERSIZE_PENV4, "PRC Envelope #4" }, { DEVMODE_PAPERSIZE_PENV5, "PRC Envelope #5" }, { DEVMODE_PAPERSIZE_PENV6, "PRC Envelope #6" }, { DEVMODE_PAPERSIZE_PENV7, "PRC Envelope #7" }, { DEVMODE_PAPERSIZE_PENV8, "PRC Envelope #8" }, { DEVMODE_PAPERSIZE_PENV9, "PRC Envelope #9" }, { DEVMODE_PAPERSIZE_PENV10, "PRC Envelope #10" }, { DEVMODE_PAPERSIZE_P16KROT, "PRC 16K (Rotated)" }, { DEVMODE_PAPERSIZE_P32KROT, "PRC 32K (Rotated)" }, { DEVMODE_PAPERSIZE_P32KBIGROT, "PRC 32K (Big, Rotated)" }, { DEVMODE_PAPERSIZE_PENV1ROT, "PRC Envelope #1 (Rotated)" }, { DEVMODE_PAPERSIZE_PENV2ROT, "PRC Envelope #2 (Rotated)" }, { DEVMODE_PAPERSIZE_PENV3ROT, "PRC Envelope #3 (Rotated)" }, { DEVMODE_PAPERSIZE_PENV4ROT, "PRC Envelope #4 (Rotated)" }, { DEVMODE_PAPERSIZE_PENV5ROT, "PRC Envelope #5 (Rotated)" }, { DEVMODE_PAPERSIZE_PENV6ROT, "PRC Envelope #6 (Rotated)" }, { DEVMODE_PAPERSIZE_PENV7ROT, "PRC Envelope #7 (Rotated)" }, { DEVMODE_PAPERSIZE_PENV8ROT, "PRC Envelope #8 (Rotated)" }, { DEVMODE_PAPERSIZE_PENV9ROT, "PRC Envelope #9 (Rotated)" }, { DEVMODE_PAPERSIZE_PENV10ROT, "PRC Envelope #10 (Rotated)" }, { 0, NULL } }; static value_string_ext devmode_papersize_vals_ext = VALUE_STRING_EXT_INIT(devmode_papersize_vals); /* List of observed specversions */ static const value_string devmode_specversion_vals[] = { { 0x0320, "Observed" }, { 0x0400, "Observed" }, { 0x0401, "Observed" }, { 0x040d, "Observed" }, { 0, NULL } }; /* Paper sources */ static const value_string devmode_papersource_vals[] = { { DEVMODE_PAPERSOURCE_UPPER, "Upper" }, { DEVMODE_PAPERSOURCE_LOWER, "Lower" }, { DEVMODE_PAPERSOURCE_MIDDLE, "Middle" }, { DEVMODE_PAPERSOURCE_MANUAL, "Manual" }, { DEVMODE_PAPERSOURCE_ENV, "Envelope" }, { DEVMODE_PAPERSOURCE_ENVMANUAL, "Envelope Manual" }, { DEVMODE_PAPERSOURCE_AUTO, "Auto" }, { DEVMODE_PAPERSOURCE_TRACTOR, "Tractor" }, { DEVMODE_PAPERSOURCE_SMALLFMT, "Small Format" }, { DEVMODE_PAPERSOURCE_LARGEFMAT, "Large Format" }, { DEVMODE_PAPERSOURCE_LARGECAP, "Large Capacity" }, { DEVMODE_PAPERSOURCE_CASSETTE, "Cassette" }, { DEVMODE_PAPERSOURCE_FORMSRC, "Form Source" }, { 0, NULL } }; static value_string_ext devmode_papersource_vals_ext = VALUE_STRING_EXT_INIT(devmode_papersource_vals); /* Print quality */ static const value_string devmode_printquality_vals[] = { { DEVMODE_PRINTQUALITY_HIGH, "High" }, { DEVMODE_PRINTQUALITY_MEDIUM, "Medium" }, { DEVMODE_PRINTQUALITY_LOW, "Low" }, { DEVMODE_PRINTQUALITY_DRAFT, "Draft" }, { 0, NULL } }; /* Color */ static const value_string devmode_colour_vals[] = { { DEVMODE_COLOUR_COLOUR, "Colour" }, { DEVMODE_COLOUR_MONO, "Monochrome" }, { 0, NULL } }; /* TrueType options */ static const value_string devmode_ttoption_vals[] = { { 0, "Not set" }, { DEVMODE_TTOPTION_BITMAP, "Bitmap" }, { DEVMODE_TTOPTION_DOWNLOAD, "Download" }, { DEVMODE_TTOPTION_DOWNLOAD_OUTLINE, "Download outline" }, { DEVMODE_TTOPTION_SUBDEV, "Substitute device fonts" }, { 0, NULL } }; /* Collate info */ static const value_string devmode_collate_vals[] = { { DEVMODE_COLLATE_FALSE, "False" }, { DEVMODE_COLLATE_TRUE, "True" }, { 0, NULL } }; /* Duplex info */ static const value_string devmode_duplex_vals[] = { { DEVMODE_DUPLEX_SIMPLEX, "Simplex" }, { DEVMODE_DUPLEX_VERT, "Vertical" }, { DEVMODE_DUPLEX_HORIZ, "Horizontal" }, { 0, NULL } }; static const value_string devmode_displayflags_vals[] = { { 0, "Colour" }, { DEVMODE_DISPLAYFLAGS_GRAYSCALE, "Grayscale" }, { DEVMODE_DISPLAYFLAGS_INTERLACED, "Interlaced" }, { 0, NULL } }; static const value_string devmode_icmmethod_vals[] = { { DEVMODE_ICMMETHOD_NONE, "None" }, { DEVMODE_ICMMETHOD_SYSTEM, "System" }, { DEVMODE_ICMMETHOD_DRIVER, "Driver" }, { DEVMODE_ICMMETHOD_DEVICE, "Device" }, { 0, NULL } }; static const value_string devmode_icmintent_vals[] = { { 0, "Not set" }, { DEVMODE_ICMINTENT_SATURATE, "Saturate" }, { DEVMODE_ICMINTENT_CONTRAST, "Contrast" }, { DEVMODE_ICMINTENT_COLORIMETRIC, "Colorimetric" }, { DEVMODE_ICMINTENT_ABS_COLORIMETRIC, "Absolute colorimetric" }, { 0, NULL } }; static const value_string devmode_mediatype_vals[] = { { 0, "Not set" }, { DEVMODE_MEDIATYPE_STANDARD, "Standard" }, { DEVMODE_MEDIATYPE_TRANSPARENCY, "Transparency" }, { DEVMODE_MEDIATYPE_GLOSSY, "Glossy" }, { 0, NULL } }; static const value_string devmode_dithertype_vals[] = { { 0, "Not set" }, { DEVMODE_DITHERTYPE_NONE, "None" }, { DEVMODE_DITHERTYPE_COARSE, "Coarse" }, { DEVMODE_DITHERTYPE_LINE, "Line" }, { DEVMODE_DITHERTYPE_LINEART, "Line art" }, { DEVMODE_DITHERTYPE_ERRORDIFFUSION, "Error diffusion" }, { DEVMODE_DITHERTYPE_RESERVED6, "Reserved 6" }, { DEVMODE_DITHERTYPE_RESERVED7, "Reserved 7" }, { DEVMODE_DITHERTYPE_GRAYSCALE, "Grayscale" }, { 0, NULL } }; static gint ett_DEVMODE_fields = -1; static int dissect_DEVMODE_fields(tvbuff_t *tvb, gint offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_, guint32 *pdata) { guint32 fields; proto_item *hidden_item; static const int * hf_fields[] = { &hf_devmode_fields_orientation, &hf_devmode_fields_papersize, &hf_devmode_fields_paperlength, &hf_devmode_fields_paperwidth, &hf_devmode_fields_scale, &hf_devmode_fields_position, &hf_devmode_fields_nup, &hf_devmode_fields_copies, &hf_devmode_fields_defaultsource, &hf_devmode_fields_printquality, &hf_devmode_fields_color, &hf_devmode_fields_duplex, &hf_devmode_fields_yresolution, &hf_devmode_fields_ttoption, &hf_devmode_fields_collate, &hf_devmode_fields_formname, &hf_devmode_fields_logpixels, &hf_devmode_fields_bitsperpel, &hf_devmode_fields_pelswidth, &hf_devmode_fields_pelsheight, &hf_devmode_fields_displayflags, &hf_devmode_fields_displayfrequency, &hf_devmode_fields_icmmethod, &hf_devmode_fields_icmintent, &hf_devmode_fields_mediatype, &hf_devmode_fields_dithertype, &hf_devmode_fields_panningwidth, &hf_devmode_fields_panningheight, NULL }; hidden_item = proto_tree_add_uint( tree, hf_devmode, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); offset = dissect_ndr_uint32(tvb, offset, pinfo, NULL, di, drep, -1, &fields); proto_tree_add_bitmask_value_with_flags(tree, tvb, offset - 4, hf_devmode_fields, ett_DEVMODE_fields, hf_fields, fields, BMT_NO_APPEND); if (pdata) *pdata = fields; return offset; } static gint ett_DEVMODE = -1; static int dissect_DEVMODE(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_item *item; proto_tree *subtree; guint16 driver_extra; gint16 print_quality; guint32 fields; int struct_start = offset; if (di->conformant_run) return offset; subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_DEVMODE, &item, "Devicemode"); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_size, NULL); /* The device name is stored in a 32-wchar buffer */ dissect_spoolss_uint16uni(tvb, offset, pinfo, subtree, drep, NULL, hf_devmode_devicename); offset += 64; offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_spec_version, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_driver_version, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_size2, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_driver_extra_len, &driver_extra); offset = dissect_DEVMODE_fields( tvb, offset, pinfo, subtree, di, drep, &fields); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_orientation, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_paper_size, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_paper_length, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_paper_width, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_scale, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_copies, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_default_source, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, NULL, di, drep, hf_devmode_print_quality, &print_quality); if (print_quality < 0) proto_tree_add_item( subtree, hf_devmode_print_quality, tvb, offset - 2, 2, DREP_ENC_INTEGER(drep)); else proto_tree_add_uint_format_value( subtree, hf_devmode_print_quality, tvb, offset - 4, 4, print_quality, "%d dpi", print_quality); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_color, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_duplex, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_y_resolution, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_tt_option, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_collate, NULL); dissect_spoolss_uint16uni(tvb, offset, pinfo, subtree, drep, NULL, hf_devmode_form_name); offset += 64; offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_devmode_log_pixels, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_bits_per_pel, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_pels_width, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_pels_height, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_display_flags, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_display_freq, NULL); /* TODO: Some of the remaining fields are optional. See rpc_parse/parse_spoolss.c in the Samba source for details. */ offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_icm_method, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_icm_intent, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_media_type, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_dither_type, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_reserved1, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_reserved2, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_panning_width, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_devmode_panning_height, NULL); if (driver_extra) offset = dissect_ndr_uint8s( tvb, offset, pinfo, subtree, di, drep, hf_devmode_driver_extra, driver_extra, NULL); proto_item_set_len(item, offset - struct_start); return offset; } /* * DEVMODE_CTR */ static gint ett_DEVMODE_CTR = -1; static int dissect_DEVMODE_CTR(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; guint32 size; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_DEVMODE_CTR, NULL, "Devicemode container"); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_devmodectr_size, &size); offset = dissect_ndr_pointer( tvb, offset, pinfo, subtree, di, drep, dissect_DEVMODE, NDR_POINTER_UNIQUE, "Devicemode", -1); return offset; } /* * Relative string given by offset into the current buffer. Note that * the offset for subsequent relstrs are against the structure start, not * the point where the offset is parsed from. */ static gint ett_RELSTR = -1; static int dissect_spoolss_relstr(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep, int hf_index, int struct_start, char **data) { proto_item *item; proto_tree *subtree; guint32 relstr_offset, relstr_start, relstr_end; char *text; /* Peek ahead to read the string. We need this for the proto_tree_add_string() call so filtering will work. */ offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_offset, &relstr_offset); relstr_start = relstr_offset + struct_start; if (relstr_offset) { relstr_end = dissect_spoolss_uint16uni( tvb, relstr_start, pinfo, NULL, drep, &text, hf_relative_string); } else { /* relstr_offset == 0 is a NULL string */ text = g_strdup(""); relstr_end = relstr_start; } /* OK now add the proto item with the string value */ item = proto_tree_add_string(tree, hf_index, tvb, relstr_start, relstr_end - relstr_start, text); subtree = proto_item_add_subtree(item, ett_RELSTR); dissect_ndr_uint32( tvb, offset - 4, pinfo, subtree, di, drep, hf_offset, NULL); if (relstr_offset) dissect_spoolss_uint16uni( tvb, relstr_start, pinfo, subtree, drep, NULL, hf_relative_string); if (data) *data = text; else g_free(text); return offset; } /* An array of relative strings. This is currently just a copy of the dissect_spoolss_relstr() function as I can't find an example driver that has more than one dependent file. */ static gint ett_RELSTR_ARRAY = -1; static int dissect_spoolss_relstrarray(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep, int hf_index, int struct_start, char **data) { proto_item *item; proto_tree *subtree; guint32 relstr_offset, relstr_start/*, relstr_end, relstr_len*/; char *text; item = proto_tree_add_string(tree, hf_index, tvb, offset, 4, ""); subtree = proto_item_add_subtree(item, ett_RELSTR_ARRAY); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_offset, &relstr_offset); /* A relative offset of zero is a NULL string */ relstr_start = relstr_offset + struct_start; if (relstr_offset) /*relstr_end = */dissect_spoolss_uint16uni( tvb, relstr_start, pinfo, subtree, drep, &text, hf_relative_string); else { text = g_strdup("NULL"); /*relstr_end = offset;*/ } /*relstr_len = relstr_end - relstr_start;*/ proto_item_append_text(item, "%s", text); if (data) *data = text; else g_free(text); return offset; } /* * PRINTER_INFO_0 */ static int hf_printer_status = -1; static const value_string printer_status_vals[] = { { PRINTER_STATUS_OK, "OK" }, { PRINTER_STATUS_PAUSED, "Paused" }, { PRINTER_STATUS_ERROR, "Error" }, { PRINTER_STATUS_PENDING_DELETION, "Pending deletion" }, { PRINTER_STATUS_PAPER_JAM, "Paper jam" }, { PRINTER_STATUS_PAPER_OUT, "Paper out" }, { PRINTER_STATUS_MANUAL_FEED, "Manual feed" }, { PRINTER_STATUS_PAPER_PROBLEM, "Paper problem" }, { PRINTER_STATUS_OFFLINE, "Offline" }, { PRINTER_STATUS_IO_ACTIVE, "IO active" }, { PRINTER_STATUS_BUSY, "Busy" }, { PRINTER_STATUS_PRINTING, "Printing" }, { PRINTER_STATUS_OUTPUT_BIN_FULL, "Output bin full" }, { PRINTER_STATUS_NOT_AVAILABLE, "Not available" }, { PRINTER_STATUS_WAITING, "Waiting" }, { PRINTER_STATUS_PROCESSING, "Processing" }, { PRINTER_STATUS_INITIALIZING, "Initialising" }, { PRINTER_STATUS_WARMING_UP, "Warming up" }, { PRINTER_STATUS_TONER_LOW, "Toner low" }, { PRINTER_STATUS_NO_TONER, "No toner" }, { PRINTER_STATUS_PAGE_PUNT, "Page punt" }, { PRINTER_STATUS_USER_INTERVENTION, "User intervention" }, { PRINTER_STATUS_OUT_OF_MEMORY, "Out of memory" }, { PRINTER_STATUS_DOOR_OPEN, "Door open" }, { PRINTER_STATUS_SERVER_UNKNOWN, "Server unknown" }, { PRINTER_STATUS_POWER_SAVE, "Power save" }, { 0, NULL } }; static value_string_ext printer_status_vals_ext = VALUE_STRING_EXT_INIT(printer_status_vals); static gint ett_PRINTER_INFO_0 = -1; static int dissect_PRINTER_INFO_0(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_printername, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_servername, 0, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_cjobs, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_total_jobs, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_total_bytes, NULL); offset = dissect_SYSTEM_TIME( tvb, offset, pinfo, tree, di, drep, "Unknown time", TRUE, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_global_counter, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_total_pages, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_major_version, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_build_version, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk7, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk8, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk9, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_session_ctr, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk11, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_printer_errors, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk13, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk14, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk15, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk16, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_changeid, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk18, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_status, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_unk20, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_c_setprinter, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_unk22, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_unk23, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_unk24, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_unk25, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_unk26, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_unk27, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_unk28, NULL); offset = dissect_ndr_uint16( tvb, offset, pinfo, tree, di, drep, hf_printer_unk29, NULL); return offset; } /* * PRINTER_INFO_1 */ static gint ett_PRINTER_INFO_1 = -1; static int dissect_PRINTER_INFO_1(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_flags, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_printerdesc, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_printername, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_printercomment, 0, NULL); return offset; } /* Job status */ static const true_false_string tfs_job_status_paused = { "Job is paused", "Job is not paused" }; static const true_false_string tfs_job_status_error = { "Job has an error", "Job is OK" }; static const true_false_string tfs_job_status_deleting = { "Job is being deleted", "Job is not being deleted" }; static const true_false_string tfs_job_status_spooling = { "Job is being spooled", "Job is not being spooled" }; static const true_false_string tfs_job_status_printing = { "Job is being printed", "Job is not being printed" }; static const true_false_string tfs_job_status_offline = { "Job is offline", "Job is not offline" }; static const true_false_string tfs_job_status_paperout = { "Job is out of paper", "Job is not out of paper" }; static const true_false_string tfs_job_status_printed = { "Job has completed printing", "Job has not completed printing" }; static const true_false_string tfs_job_status_deleted = { "Job has been deleted", "Job has not been deleted" }; static const true_false_string tfs_job_status_blocked = { "Job has been blocked", "Job has not been blocked" }; static const true_false_string tfs_job_status_user_intervention = { "User intervention required", "User intervention not required" }; static gint ett_job_status = -1; static int dissect_job_status(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 status; static const int * hf_status[] = { &hf_job_status_user_intervention, &hf_job_status_blocked, &hf_job_status_deleted, &hf_job_status_printed, &hf_job_status_paperout, &hf_job_status_offline, &hf_job_status_printing, &hf_job_status_spooling, &hf_job_status_deleting, &hf_job_status_error, &hf_job_status_paused, NULL }; offset = dissect_ndr_uint32(tvb, offset, pinfo, NULL, di, drep, -1, &status); proto_tree_add_bitmask_value_with_flags(tree, tvb, offset - 4, hf_job_status, ett_job_status, hf_status, status, BMT_NO_APPEND); return offset; } /* Printer attributes */ static gint ett_printer_attributes = -1; static int hf_printer_attributes = -1; static int hf_printer_attributes_queued = -1; static int hf_printer_attributes_direct = -1; static int hf_printer_attributes_default = -1; static int hf_printer_attributes_shared = -1; static int hf_printer_attributes_network = -1; static int hf_printer_attributes_hidden = -1; static int hf_printer_attributes_local = -1; static int hf_printer_attributes_enable_devq = -1; static int hf_printer_attributes_keep_printed_jobs = -1; static int hf_printer_attributes_do_complete_first = -1; static int hf_printer_attributes_work_offline = -1; static int hf_printer_attributes_enable_bidi = -1; static int hf_printer_attributes_raw_only = -1; static int hf_printer_attributes_published = -1; static const true_false_string tfs_printer_attributes_queued = { "Printer starts printing after last page spooled", "Printer starts printing while spooling" }; static const true_false_string tfs_printer_attributes_direct = { "Jobs sent directly to printer", "Jobs are spooled to printer before printing" }; static const true_false_string tfs_printer_attributes_default = { "Printer is the default printer", "Printer is not the default printer" }; static const true_false_string tfs_printer_attributes_shared = { "Printer is shared", "Printer is not shared" }; static const true_false_string tfs_printer_attributes_network = { "Printer is a network printer connection", "Printer is not a network printer connection" }; static const true_false_string tfs_printer_attributes_hidden = { "Reserved", "Reserved" }; static const true_false_string tfs_printer_attributes_local = { "Printer is a local printer", "Printer is not a local printer" }; static const true_false_string tfs_printer_attributes_enable_devq = { "Call DevQueryPrint", "Do not call DevQueryPrint" }; static const true_false_string tfs_printer_attributes_keep_printed_jobs = { "Jobs are kept after they are printed", "Jobs are deleted after printing" }; static const true_false_string tfs_printer_attributes_do_complete_first = { "Jobs that have completed spooling are scheduled before still spooling jobs", "Jobs are scheduled in the order they start spooling" }; static const true_false_string tfs_printer_attributes_work_offline = { "The printer is currently connected", "The printer is currently not connected" }; static const true_false_string tfs_printer_attributes_enable_bidi = { "Bidirectional communications are supported", "Bidirectional communications are not supported" }; static const true_false_string tfs_printer_attributes_raw_only = { "Only raw data type print jobs can be spooled", "All data type print jobs can be spooled" }; static const true_false_string tfs_printer_attributes_published = { "Printer is published in the directory", "Printer is not published in the directory" }; static int dissect_printer_attributes(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 attributes; static const int * hf_attributes[] = { &hf_printer_attributes_published, &hf_printer_attributes_raw_only, &hf_printer_attributes_enable_bidi, &hf_printer_attributes_work_offline, &hf_printer_attributes_do_complete_first, &hf_printer_attributes_keep_printed_jobs, &hf_printer_attributes_enable_devq, &hf_printer_attributes_local, &hf_printer_attributes_hidden, &hf_printer_attributes_network, &hf_printer_attributes_shared, &hf_printer_attributes_default, &hf_printer_attributes_direct, &hf_printer_attributes_queued, NULL }; offset = dissect_ndr_uint32(tvb, offset, pinfo, NULL, di, drep, -1, &attributes); proto_tree_add_bitmask_value_with_flags(tree, tvb, offset - 4, hf_printer_attributes, ett_printer_attributes, hf_attributes, attributes, BMT_NO_APPEND); return offset; } /* * PRINTER_INFO_2 */ static gint ett_PRINTER_INFO_2 = -1; static int dissect_PRINTER_INFO_2(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 devmode_offset, secdesc_offset; offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_servername, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_printername, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_sharename, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_portname, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_drivername, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_printercomment, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_printerlocation, 0, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_offset, &devmode_offset); dissect_DEVMODE(tvb, devmode_offset - 4, pinfo, tree, di, drep); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_sepfile, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_printprocessor, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_datatype, 0, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_parameters, 0, NULL); /* * XXX - what *is* the length of this security descriptor? * "prs_PRINTER_INFO_2()" is passed to "defer_ptr()", but * "defer_ptr" takes, as an argument, a function with a * different calling sequence from "prs_PRINTER_INFO_2()", * lacking the "len" argument, so that won't work. */ offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_offset, &secdesc_offset); dissect_nt_sec_desc( tvb, secdesc_offset, pinfo, tree, drep, FALSE, -1, &spoolss_printer_access_mask_info); offset = dissect_printer_attributes(tvb, offset, pinfo, tree, di, drep); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_printer_priority, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_printer_default_priority, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_start_time, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_end_time, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_status, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_printer_jobs, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_printer_averageppm, NULL); return offset; } /* * PRINTER_INFO_3 */ static gint ett_PRINTER_INFO_3 = -1; static int dissect_PRINTER_INFO_3(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_flags, NULL); offset = dissect_nt_sec_desc( tvb, offset, pinfo, tree, drep, FALSE, -1, &spoolss_printer_access_mask_info); return offset; } /* * PRINTER_INFO_7 */ static gint ett_PRINTER_INFO_7 = -1; static const value_string getprinter_action_vals[] = { { DS_PUBLISH, "Publish" }, { DS_UNPUBLISH, "Unpublish" }, { DS_UPDATE, "Update" }, /* Not sure what the constant values are here */ /* { DS_PENDING, "Pending" }, */ /* { DS_REPUBLISH, "Republish" }, */ { 0, NULL } }; static int dissect_PRINTER_INFO_7(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { offset = dissect_spoolss_relstr( tvb, offset, pinfo, tree, di, drep, hf_printer_guid, 0, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_action, NULL); return offset; } /* * PRINTER_DATATYPE structure */ static gint ett_PRINTER_DATATYPE = -1; static int dissect_PRINTER_DATATYPE(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { if (di->conformant_run) return offset; offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_datatype, TRUE, NULL); return offset; } /* * USER_LEVEL_1 structure */ static gint ett_USER_LEVEL_1 = -1; static int hf_userlevel_size = -1; static int hf_userlevel_client = -1; static int hf_userlevel_user = -1; static int hf_userlevel_build = -1; static int hf_userlevel_major = -1; static int hf_userlevel_minor = -1; static int hf_userlevel_processor = -1; static int dissect_USER_LEVEL_1(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 level; /* Guy has pointed out that this dissection looks wrong. In the wireshark output for a USER_LEVEL_1 it looks like the info level and container pointer are transposed. I'm not even sure this structure is a container. */ offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_userlevel_size, NULL); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, tree, di, drep, NDR_POINTER_UNIQUE, "Client", hf_userlevel_client, 0); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, tree, di, drep, NDR_POINTER_UNIQUE, "User", hf_userlevel_user, 0); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_userlevel_build, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_userlevel_major, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_userlevel_minor, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_userlevel_processor, NULL); return offset; } /* * USER_LEVEL_CTR structure */ static gint ett_USER_LEVEL_CTR = -1; static int dissect_USER_LEVEL_CTR(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; proto_item *item; guint32 level; if (di->conformant_run) return offset; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_USER_LEVEL_CTR, &item, "User level container"); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_level, &level); switch(level) { case 1: offset = dissect_ndr_pointer( tvb, offset, pinfo, subtree, di, drep, dissect_USER_LEVEL_1, NDR_POINTER_UNIQUE, "User level 1", -1); break; default: expert_add_info_format(pinfo, item, &ei_level, "Info level %d not decoded", level); break; } return offset; } /* * SpoolssOpenPrinterEx */ static int SpoolssOpenPrinterEx_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; char *name; /* Parse packet */ dcv->private_data=NULL; offset = dissect_ndr_pointer_cb( tvb, offset, pinfo, tree, di, drep, dissect_ndr_wchar_cvstring, NDR_POINTER_UNIQUE, "Printer name", hf_printername, cb_wstr_postprocess, GINT_TO_POINTER(CB_STR_COL_INFO | CB_STR_SAVE | 1)); name = (char *)dcv->private_data; /* OpenPrinterEx() stores the key/value in se_data */ if(!pinfo->fd->flags.visited){ if(!dcv->se_data){ dcv->se_data = wmem_strdup_printf(wmem_file_scope(), "%s", name?name:""); } } offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_PRINTER_DATATYPE, NDR_POINTER_UNIQUE, "Printer datatype", -1); offset = dissect_DEVMODE_CTR(tvb, offset, pinfo, tree, di, drep); name=(char *)dcv->se_data; if (name) { if (name[0] == '\\' && name[1] == '\\') name += 2; /* Determine if we are opening a printer or a print server */ if (strchr(name, '\\')) offset = dissect_nt_access_mask( tvb, offset, pinfo, tree, di, drep, hf_access_required, &spoolss_printer_access_mask_info, NULL); else offset = dissect_nt_access_mask( tvb, offset, pinfo, tree, di, drep, hf_access_required, &spoolss_printserver_access_mask_info, NULL); } else { /* We can't decide what type of object being opened */ offset = dissect_nt_access_mask( tvb, offset, pinfo, tree, di, drep, hf_access_required, NULL, NULL); } offset = dissect_USER_LEVEL_CTR(tvb, offset, pinfo, tree, di, drep); return offset; } static int SpoolssOpenPrinterEx_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; e_ctx_hnd policy_hnd; proto_item *hnd_item; guint32 status; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, &hnd_item, TRUE, FALSE); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, &status); if( status == 0 ){ const char *pol_name; if (dcv->se_data){ pol_name = wmem_strdup_printf(wmem_packet_scope(), "OpenPrinterEx(%s)", (char *)dcv->se_data); } else { pol_name = "Unknown OpenPrinterEx() handle"; } if(!pinfo->fd->flags.visited){ dcerpc_store_polhnd_name(&policy_hnd, pinfo, pol_name); } if(hnd_item) proto_item_append_text(hnd_item, ": %s", pol_name); } return offset; } static const value_string printer_notify_option_data_vals[] = { { PRINTER_NOTIFY_SERVER_NAME, "Server name" }, { PRINTER_NOTIFY_PRINTER_NAME, "Printer name" }, { PRINTER_NOTIFY_SHARE_NAME, "Share name" }, { PRINTER_NOTIFY_PORT_NAME, "Port name" }, { PRINTER_NOTIFY_DRIVER_NAME, "Driver name" }, { PRINTER_NOTIFY_COMMENT, "Comment" }, { PRINTER_NOTIFY_LOCATION, "Location" }, { PRINTER_NOTIFY_DEVMODE, "Devmode" }, { PRINTER_NOTIFY_SEPFILE, "Sepfile" }, { PRINTER_NOTIFY_PRINT_PROCESSOR, "Print processor" }, { PRINTER_NOTIFY_PARAMETERS, "Parameters" }, { PRINTER_NOTIFY_DATATYPE, "Datatype" }, { PRINTER_NOTIFY_SECURITY_DESCRIPTOR, "Security descriptor" }, { PRINTER_NOTIFY_ATTRIBUTES, "Attributes" }, { PRINTER_NOTIFY_PRIORITY, "Priority" }, { PRINTER_NOTIFY_DEFAULT_PRIORITY, "Default priority" }, { PRINTER_NOTIFY_START_TIME, "Start time" }, { PRINTER_NOTIFY_UNTIL_TIME, "Until time" }, { PRINTER_NOTIFY_STATUS, "Status" }, { PRINTER_NOTIFY_STATUS_STRING, "Status string" }, { PRINTER_NOTIFY_CJOBS, "Cjobs" }, { PRINTER_NOTIFY_AVERAGE_PPM, "Average PPM" }, { PRINTER_NOTIFY_TOTAL_PAGES, "Total pages" }, { PRINTER_NOTIFY_PAGES_PRINTED, "Pages printed" }, { PRINTER_NOTIFY_TOTAL_BYTES, "Total bytes" }, { PRINTER_NOTIFY_BYTES_PRINTED, "Bytes printed" }, { 0, NULL} }; static value_string_ext printer_notify_option_data_vals_ext = VALUE_STRING_EXT_INIT(printer_notify_option_data_vals); static const value_string job_notify_option_data_vals[] = { { JOB_NOTIFY_PRINTER_NAME, "Printer name" }, { JOB_NOTIFY_MACHINE_NAME, "Machine name" }, { JOB_NOTIFY_PORT_NAME, "Port name" }, { JOB_NOTIFY_USER_NAME, "User name" }, { JOB_NOTIFY_NOTIFY_NAME, "Notify name" }, { JOB_NOTIFY_DATATYPE, "Data type" }, { JOB_NOTIFY_PRINT_PROCESSOR, "Print processor" }, { JOB_NOTIFY_PARAMETERS, "Parameters" }, { JOB_NOTIFY_DRIVER_NAME, "Driver name" }, { JOB_NOTIFY_DEVMODE, "Devmode" }, { JOB_NOTIFY_STATUS, "Status" }, { JOB_NOTIFY_STATUS_STRING, "Status string" }, { JOB_NOTIFY_SECURITY_DESCRIPTOR, "Security descriptor" }, { JOB_NOTIFY_DOCUMENT, "Document" }, { JOB_NOTIFY_PRIORITY, "Priority" }, { JOB_NOTIFY_POSITION, "Position" }, { JOB_NOTIFY_SUBMITTED, "Submitted" }, { JOB_NOTIFY_START_TIME, "Start time" }, { JOB_NOTIFY_UNTIL_TIME, "Until time" }, { JOB_NOTIFY_TIME, "Time" }, { JOB_NOTIFY_TOTAL_PAGES, "Total pages" }, { JOB_NOTIFY_PAGES_PRINTED, "Pages printed" }, { JOB_NOTIFY_TOTAL_BYTES, "Total bytes" }, { JOB_NOTIFY_BYTES_PRINTED, "Bytes printed" }, { 0, NULL} }; static value_string_ext job_notify_option_data_vals_ext = VALUE_STRING_EXT_INIT(job_notify_option_data_vals); static int dissect_notify_field(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep, guint16 type, guint16 *data) { guint16 field; const char *str; offset = dissect_ndr_uint16( tvb, offset, pinfo, NULL, di, drep, hf_notify_field, &field); switch(type) { case PRINTER_NOTIFY_TYPE: str = val_to_str_ext_const(field, &printer_notify_option_data_vals_ext, "Unknown"); break; case JOB_NOTIFY_TYPE: str = val_to_str_ext_const(field, &job_notify_option_data_vals_ext, "Unknown"); break; default: str = "Unknown notify type"; break; } proto_tree_add_uint_format_value(tree, hf_notify_field, tvb, offset - 2, 2, field, "%s (%d)", str, field); if (data) *data = field; return offset; } static int dissect_NOTIFY_OPTION_DATA(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 count, i; guint16 type; if (di->conformant_run) return offset; offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_option_data_count, &count); type = GPOINTER_TO_INT(dcv->private_data); for (i = 0; i < count; i++) offset = dissect_notify_field( tvb, offset, pinfo, tree, di, drep, type, NULL); return offset; } static const value_string printer_notify_types[] = { { PRINTER_NOTIFY_TYPE, "Printer notify" }, { JOB_NOTIFY_TYPE, "Job notify" }, { 0, NULL } }; static const char *notify_plural(int count) { if (count == 1) return "notification"; return "notifies"; } static gint ett_NOTIFY_OPTION = -1; static int dissect_NOTIFY_OPTION(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; proto_item *item; proto_tree *subtree; guint16 type; guint32 count; subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_NOTIFY_OPTION, &item, "Notify Option"); offset = dissect_ndr_uint16(tvb, offset, pinfo, subtree, di, drep, hf_notify_option_type, &type); proto_item_append_text( item, ": %s", val_to_str(type, printer_notify_types, "Unknown (%d)")); offset = dissect_ndr_uint16(tvb, offset, pinfo, subtree, di, drep, hf_notify_option_reserved1, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_notify_option_reserved2, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_notify_option_reserved3, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_notify_option_count, &count); proto_item_append_text( item, ", %d %s", count, notify_plural(count)); dcv->private_data = GINT_TO_POINTER((int)type); offset = dissect_ndr_pointer( tvb, offset, pinfo, subtree, di, drep, dissect_NOTIFY_OPTION_DATA, NDR_POINTER_UNIQUE, "Notify Option Data", -1); return offset; } static int dissect_NOTIFY_OPTIONS_ARRAY(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Why is a check for di->conformant_run not required here? */ offset = dissect_ndr_ucarray( tvb, offset, pinfo, tree, di, drep, dissect_NOTIFY_OPTION); return offset; } static gint ett_notify_options_flags = -1; static const true_false_string tfs_notify_options_flags_refresh = { "Data for all monitored fields is present", "Data for all monitored fields not present" }; static int dissect_notify_options_flags(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 flags; static const int * hf_flags[] = { &hf_notify_options_flags_refresh, NULL }; offset = dissect_ndr_uint32(tvb, offset, pinfo, NULL, di, drep, -1, &flags); proto_tree_add_bitmask_value_with_flags(tree, tvb, offset - 4, hf_notify_options_flags, ett_notify_options_flags, hf_flags, flags, BMT_NO_APPEND); return offset; } static int dissect_NOTIFY_OPTIONS_ARRAY_CTR(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { if (di->conformant_run) return offset; offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_notify_options_version, NULL); offset = dissect_notify_options_flags(tvb, offset, pinfo, tree, di, drep); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_notify_options_count, NULL); offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_NOTIFY_OPTIONS_ARRAY, NDR_POINTER_UNIQUE, "Notify Options Array", -1); return offset; } /* * SpoolssRFFPCNEX */ static gint ett_rffpcnex_flags = -1; static int hf_rffpcnex_flags = -1; static int hf_rffpcnex_options = -1; static int hf_rffpcnex_flags_add_printer = -1; static int hf_rffpcnex_flags_set_printer = -1; static int hf_rffpcnex_flags_delete_printer = -1; static int hf_rffpcnex_flags_failed_printer_connection = -1; static const true_false_string tfs_rffpcnex_flags_add_printer = { "Notify on add printer", "Don't notify on add printer" }; static const true_false_string tfs_rffpcnex_flags_set_printer = { "Notify on set printer", "Don't notify on set printer" }; static const true_false_string tfs_rffpcnex_flags_delete_printer = { "Notify on delete printer", "Don't notify on delete printer" }; static const true_false_string tfs_rffpcnex_flags_failed_connection_printer = { "Notify on failed printer connection", "Don't notify on failed printer connection" }; static int hf_rffpcnex_flags_add_job = -1; static int hf_rffpcnex_flags_set_job = -1; static int hf_rffpcnex_flags_delete_job = -1; static int hf_rffpcnex_flags_write_job = -1; static const true_false_string tfs_rffpcnex_flags_add_job = { "Notify on add job", "Don't notify on add job" }; static const true_false_string tfs_rffpcnex_flags_set_job = { "Notify on set job", "Don't notify on set job" }; static const true_false_string tfs_rffpcnex_flags_delete_job = { "Notify on delete job", "Don't notify on delete job" }; static const true_false_string tfs_rffpcnex_flags_write_job = { "Notify on writejob", "Don't notify on write job" }; static int hf_rffpcnex_flags_add_form = -1; static int hf_rffpcnex_flags_set_form = -1; static int hf_rffpcnex_flags_delete_form = -1; static const true_false_string tfs_rffpcnex_flags_add_form = { "Notify on add form", "Don't notify on add form" }; static const true_false_string tfs_rffpcnex_flags_set_form = { "Notify on set form", "Don't notify on set form" }; static const true_false_string tfs_rffpcnex_flags_delete_form = { "Notify on delete form", "Don't notify on delete form" }; static int hf_rffpcnex_flags_add_port = -1; static int hf_rffpcnex_flags_configure_port = -1; static int hf_rffpcnex_flags_delete_port = -1; static const true_false_string tfs_rffpcnex_flags_add_port = { "Notify on add port", "Don't notify on add port" }; static const true_false_string tfs_rffpcnex_flags_configure_port = { "Notify on configure port", "Don't notify on configure port" }; static const true_false_string tfs_rffpcnex_flags_delete_port = { "Notify on delete port", "Don't notify on delete port" }; static int hf_rffpcnex_flags_add_print_processor = -1; static int hf_rffpcnex_flags_delete_print_processor = -1; static const true_false_string tfs_rffpcnex_flags_add_print_processor = { "Notify on add driver", "Don't notify on add driver" }; static const true_false_string tfs_rffpcnex_flags_delete_print_processor = { "Notify on add driver", "Don't notify on add driver" }; static int hf_rffpcnex_flags_add_driver = -1; static int hf_rffpcnex_flags_set_driver = -1; static int hf_rffpcnex_flags_delete_driver = -1; static const true_false_string tfs_rffpcnex_flags_add_driver = { "Notify on add driver", "Don't notify on add driver" }; static const true_false_string tfs_rffpcnex_flags_set_driver = { "Notify on set driver", "Don't notify on set driver" }; static const true_false_string tfs_rffpcnex_flags_delete_driver = { "Notify on delete driver", "Don't notify on delete driver" }; static int hf_rffpcnex_flags_timeout = -1; static const true_false_string tfs_rffpcnex_flags_timeout = { "Notify on timeout", "Don't notify on timeout" }; static int SpoolssRFFPCNEX_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { guint32 flags; static const int * hf_flags[] = { &hf_rffpcnex_flags_timeout, &hf_rffpcnex_flags_delete_driver, &hf_rffpcnex_flags_set_driver, &hf_rffpcnex_flags_add_driver, &hf_rffpcnex_flags_delete_print_processor, &hf_rffpcnex_flags_add_print_processor, &hf_rffpcnex_flags_delete_port, &hf_rffpcnex_flags_configure_port, &hf_rffpcnex_flags_add_port, &hf_rffpcnex_flags_delete_form, &hf_rffpcnex_flags_set_form, &hf_rffpcnex_flags_add_form, &hf_rffpcnex_flags_write_job, &hf_rffpcnex_flags_delete_job, &hf_rffpcnex_flags_set_job, &hf_rffpcnex_flags_add_job, &hf_rffpcnex_flags_failed_printer_connection, &hf_rffpcnex_flags_delete_printer, &hf_rffpcnex_flags_set_printer, &hf_rffpcnex_flags_add_printer, NULL }; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32(tvb, offset, pinfo, NULL, di, drep, -1, &flags); proto_tree_add_bitmask_value(tree, tvb, offset - 4, hf_rffpcnex_flags, ett_rffpcnex_flags, hf_flags, flags); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_rffpcnex_options, NULL); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, tree, di, drep, NDR_POINTER_UNIQUE, "Server", hf_servername, 0); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printerlocal, NULL); offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_NOTIFY_OPTIONS_ARRAY_CTR, NDR_POINTER_UNIQUE, "Notify Options Container", -1); return offset; } static int SpoolssRFFPCNEX_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * SpoolssReplyOpenPrinter */ static int SpoolssReplyOpenPrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 printerlocal; char *name; /* Parse packet */ name=NULL; offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_servername, TRUE, &name); /* ReplyOpenPrinter() stores the printername in se_data */ if(!pinfo->fd->flags.visited){ if(!dcv->se_data){ if(name){ dcv->se_data = wmem_strdup(wmem_file_scope(), name); } } } if (name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", name); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printerlocal, &printerlocal); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_printerdata_type, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_replyopenprinter_unk0, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_replyopenprinter_unk1, NULL); return offset; } static int SpoolssReplyOpenPrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; e_ctx_hnd policy_hnd; proto_item *hnd_item; guint32 status; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, &hnd_item, TRUE, FALSE); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, &status); if( status == 0 ){ const char *pol_name; if (dcv->se_data){ pol_name = wmem_strdup_printf(wmem_packet_scope(), "ReplyOpenPrinter(%s)", (char *)dcv->se_data); } else { pol_name = "Unknown ReplyOpenPrinter() handle"; } if(!pinfo->fd->flags.visited){ dcerpc_store_polhnd_name(&policy_hnd, pinfo, pol_name); } if(hnd_item) proto_item_append_text(hnd_item, ": %s", pol_name); } return offset; } /* * SpoolssGetPrinter */ static int SpoolssGetPrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 level; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); /* GetPrinter() stores the level in se_data */ if(!pinfo->fd->flags.visited){ dcv->se_data = GINT_TO_POINTER((int)level); } col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_spoolss_buffer( tvb, offset, pinfo, tree, di, drep, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static gint ett_PRINTER_INFO = -1; static int SpoolssGetPrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; BUFFER buffer; gint16 level = GPOINTER_TO_INT(dcv->se_data); proto_item *item = NULL; proto_tree *subtree = NULL; col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); /* Parse packet */ offset = dissect_spoolss_buffer( tvb, offset, pinfo, tree, di, drep, &buffer); if (buffer.tvb) { subtree = proto_tree_add_subtree_format( buffer.tree, buffer.tvb, 0, -1, ett_PRINTER_INFO, &item, "Print info level %d", level); switch(level) { case 0: dissect_PRINTER_INFO_0( buffer.tvb, 0, pinfo, subtree, di, drep); break; case 1: dissect_PRINTER_INFO_1( buffer.tvb, 0, pinfo, subtree, di, drep); break; case 2: dissect_PRINTER_INFO_2( buffer.tvb, 0, pinfo, subtree, di, drep); break; case 3: dissect_PRINTER_INFO_3( buffer.tvb, 0, pinfo, subtree, di, drep); break; case 7: dissect_PRINTER_INFO_7( buffer.tvb, 0, pinfo, subtree, di, drep); break; default: expert_add_info(pinfo, item, &ei_printer_info_level); break; } } offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * SEC_DESC_BUF */ static gint ett_SEC_DESC_BUF = -1; static int hf_secdescbuf_maxlen = -1; static int hf_secdescbuf_undoc = -1; static int hf_secdescbuf_len = -1; static int dissect_SEC_DESC_BUF(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; guint32 len; /* XXX: I think this is really a array of bytes which can be dissected using dissect_ndr_cvstring(). The dissected data can be passed to dissect_nt_sec_desc(). The problem is that dissect_nt_cvstring() passes back a char * where it really should pass back a tvb. */ subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_SEC_DESC_BUF, NULL, "Security descriptor buffer"); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_secdescbuf_maxlen, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_secdescbuf_undoc, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_secdescbuf_len, &len); dissect_nt_sec_desc( tvb, offset, pinfo, subtree, drep, TRUE, len, &spoolss_printer_access_mask_info); offset += len; return offset; } /* * SPOOL_PRINTER_INFO_LEVEL */ static gint ett_SPOOL_PRINTER_INFO_LEVEL = -1; /* spool printer info */ static int hf_spool_printer_info_devmode_ptr = -1; static int hf_spool_printer_info_secdesc_ptr = -1; static int dissect_SPOOL_PRINTER_INFO(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; guint32 level; proto_tree *item; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_SPOOL_PRINTER_INFO_LEVEL, &item, "Spool printer info level"); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_level, &level); switch(level) { case 3: { guint32 devmode_ptr, secdesc_ptr; /* I can't seem to get this working with the correct dissect_ndr_pointer() function so let's cheat and dissect the pointers by hand. )-: */ offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_spool_printer_info_devmode_ptr, &devmode_ptr); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_spool_printer_info_secdesc_ptr, &secdesc_ptr); if (devmode_ptr) offset = dissect_DEVMODE_CTR( tvb, offset, pinfo, subtree, di, drep); if (secdesc_ptr) offset = dissect_SEC_DESC_BUF( tvb, offset, pinfo, subtree, di, drep); break; } case 2: default: expert_add_info_format(pinfo, item, &ei_spool_printer_info_level, "Unknown spool printer info level %d", level); break; } return offset; } /* * SpoolssSetPrinter */ static int hf_setprinter_cmd = -1; static const value_string setprinter_cmd_vals[] = { { SPOOLSS_PRINTER_CONTROL_UNPAUSE, "Unpause" }, { SPOOLSS_PRINTER_CONTROL_PAUSE, "Pause" }, { SPOOLSS_PRINTER_CONTROL_RESUME, "Resume" }, { SPOOLSS_PRINTER_CONTROL_PURGE, "Purge" }, { SPOOLSS_PRINTER_CONTROL_SET_STATUS, "Set status" }, { 0, NULL } }; static int SpoolssSetPrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { guint32 level; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_SPOOL_PRINTER_INFO( tvb, offset, pinfo, tree, di, drep); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_setprinter_cmd, NULL); return offset; } static int SpoolssSetPrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * FORM_REL */ static const value_string form_type_vals[] = { { SPOOLSS_FORM_USER, "User" }, { SPOOLSS_FORM_BUILTIN, "Builtin" }, { SPOOLSS_FORM_PRINTER, "Printer" }, { 0, NULL } }; static gint ett_FORM_REL = -1; static int dissect_FORM_REL(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep, int struct_start) { proto_item *item; proto_tree *subtree; guint32 flags; int item_start = offset; char *name = NULL; subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_FORM_REL, &item, "Form"); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_flags, &flags); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_form_name, struct_start, &name); if (name) { proto_item_append_text(item, ": %s", name); g_free(name); } offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_width, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_height, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_left_margin, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_top_margin, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_horiz_len, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_vert_len, NULL); proto_item_set_len(item, offset - item_start); return offset; } /* * SpoolssEnumForms */ static int SpoolssEnumForms_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 level; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); /* EnumForms() stores the level in se_data */ if(!pinfo->fd->flags.visited){ dcv->se_data = GINT_TO_POINTER((int)level); } col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_spoolss_buffer( tvb, offset, pinfo, tree, di, drep, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static int SpoolssEnumForms_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; BUFFER buffer; guint32 level = GPOINTER_TO_UINT(dcv->se_data), i, count; int buffer_offset; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_spoolss_buffer( tvb, offset, pinfo, tree, di, drep, &buffer); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_enumforms_num, &count); /* Unfortunately this array isn't in NDR format so we can't use prs_array(). The other weird thing is the struct_start being inside the loop rather than outside. Very strange. */ buffer_offset = 0; for (i = 0; i < count; i++) { int struct_start = buffer_offset; buffer_offset = dissect_FORM_REL( buffer.tvb, buffer_offset, pinfo, buffer.tree, di, drep, struct_start); } offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * SpoolssDeletePrinter */ static int SpoolssDeletePrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); return offset; } static int SpoolssDeletePrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } static int SpoolssAddPrinterEx_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; e_ctx_hnd policy_hnd; proto_item *hnd_item; guint32 status; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, &hnd_item, TRUE, FALSE); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, &status); if( status == 0 ){ const char *pol_name; if (dcv->se_data){ pol_name = wmem_strdup_printf(wmem_packet_scope(), "AddPrinterEx(%s)", (char *)dcv->se_data); } else { pol_name = "Unknown AddPrinterEx() handle"; } if(!pinfo->fd->flags.visited){ dcerpc_store_polhnd_name(&policy_hnd, pinfo, pol_name); } if(hnd_item) proto_item_append_text(hnd_item, ": %s", pol_name); } return offset; } /* * SpoolssEnumPrinterData */ static int hf_enumprinterdata_enumindex = -1; static int hf_enumprinterdata_value_offered = -1; static int hf_enumprinterdata_data_offered = -1; static int hf_enumprinterdata_value_len = -1; static int hf_enumprinterdata_value_needed = -1; static int hf_enumprinterdata_data_needed = -1; static int SpoolssEnumPrinterData_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { guint32 ndx; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_enumprinterdata_enumindex, &ndx); col_append_fstr(pinfo->cinfo, COL_INFO, ", index %d", ndx); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_enumprinterdata_value_offered, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_enumprinterdata_data_offered, NULL); return offset; } static int SpoolssEnumPrinterData_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { guint32 value_len, type; char *value; proto_item *value_item; proto_tree *value_subtree; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ value_subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_printerdata_value, &value_item, "Value"); offset = dissect_ndr_uint32( tvb, offset, pinfo, value_subtree, di, drep, hf_enumprinterdata_value_len, &value_len); if (value_len) { dissect_spoolss_uint16uni( tvb, offset, pinfo, value_subtree, drep, &value, hf_value_name); offset += value_len * 2; if (value && value[0]) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", value); proto_item_append_text(value_item, ": %s", value); hidden_item = proto_tree_add_string( tree, hf_printerdata_value, tvb, offset, 0, value); PROTO_ITEM_SET_HIDDEN(hidden_item); g_free(value); } proto_item_set_len(value_item, value_len * 2 + 4); offset = dissect_ndr_uint32( tvb, offset, pinfo, value_subtree, di, drep, hf_enumprinterdata_value_needed, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printerdata_type, &type); offset = dissect_printerdata_data( tvb, offset, pinfo, tree, di, drep, type); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_enumprinterdata_data_needed, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * SpoolssEnumPrinters */ static gint ett_enumprinters_flags = -1; static int hf_enumprinters_flags = -1; static int hf_enumprinters_flags_local = -1; static int hf_enumprinters_flags_name = -1; static int hf_enumprinters_flags_shared = -1; static int hf_enumprinters_flags_default = -1; static int hf_enumprinters_flags_connections = -1; static int hf_enumprinters_flags_network = -1; static int hf_enumprinters_flags_remote = -1; static int SpoolssEnumPrinters_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { guint32 level, flags; dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; static const int * hf_flags[] = { &hf_enumprinters_flags_network, &hf_enumprinters_flags_shared, &hf_enumprinters_flags_remote, &hf_enumprinters_flags_name, &hf_enumprinters_flags_connections, &hf_enumprinters_flags_local, &hf_enumprinters_flags_default, NULL }; /* Parse packet */ offset = dissect_ndr_uint32(tvb, offset, pinfo, NULL, di, drep, -1, &flags); proto_tree_add_bitmask_value(tree, tvb, offset - 4, hf_enumprinters_flags, ett_enumprinters_flags, hf_flags, flags); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, tree, di, drep, NDR_POINTER_UNIQUE, "Server name", hf_servername, 0); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); /* GetPrinter() stores the level in se_data */ if(!pinfo->fd->flags.visited){ dcv->se_data = GINT_TO_POINTER((int)level); } col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_spoolss_buffer( tvb, offset, pinfo, tree, di, drep, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static int SpoolssEnumPrinters_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { guint32 num_drivers; dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; gint16 level = GPOINTER_TO_INT(dcv->se_data); BUFFER buffer; proto_item *item; proto_tree *subtree = NULL; col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); /* Parse packet */ offset = dissect_spoolss_buffer( tvb, offset, pinfo, tree, di, drep, &buffer); if (buffer.tvb) { subtree = proto_tree_add_subtree_format( buffer.tree, buffer.tvb, 0, -1, ett_PRINTER_INFO, &item, "Print info level %d", level); switch(level) { case 0: dissect_PRINTER_INFO_0( buffer.tvb, 0, pinfo, subtree, di, drep); break; case 1: dissect_PRINTER_INFO_1( buffer.tvb, 0, pinfo, subtree, di, drep); break; case 2: dissect_PRINTER_INFO_2( buffer.tvb, 0, pinfo, subtree, di, drep); break; case 3: dissect_PRINTER_INFO_3( buffer.tvb, 0, pinfo, subtree, di, drep); break; case 7: dissect_PRINTER_INFO_7( buffer.tvb, 0, pinfo, subtree, di, drep); break; default: expert_add_info(pinfo, item, &ei_printer_info_level); break; } } offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_returned, &num_drivers); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * AddPrinterDriver */ static int SpoolssAddPrinterDriver_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * FORM_1 */ static gint ett_FORM_1 = -1; static int dissect_FORM_1(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; guint32 flags; subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_FORM_1, NULL, "Form level 1"); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, subtree, di, drep, NDR_POINTER_UNIQUE, "Name", hf_form_name, 0); /* Eek - we need to know whether this pointer was NULL or not. Currently there is not any way to do this. */ if (tvb_reported_length_remaining(tvb, offset) <= 0) goto done; offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_flags, &flags); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_unknown, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_width, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_height, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_left_margin, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_top_margin, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_horiz_len, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_vert_len, NULL); done: return offset; } /* * FORM_CTR */ static gint ett_FORM_CTR = -1; static int dissect_FORM_CTR(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; proto_item *item; guint32 level; subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_FORM_CTR, &item, "Form container"); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_form_level, &level); switch(level) { case 1: offset = dissect_FORM_1(tvb, offset, pinfo, subtree, di, drep); break; default: expert_add_info_format(pinfo, item, &ei_form_level, "Unknown form info level %d", level); break; } return offset; } /* * AddForm */ static int SpoolssAddForm_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 level; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_form_level, &level); col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); /* AddForm() stores the level in se_data */ if(!pinfo->fd->flags.visited){ dcv->se_data = GUINT_TO_POINTER((int)level); } offset = dissect_FORM_CTR(tvb, offset, pinfo, tree, di, drep); return offset; } static int SpoolssAddForm_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * DeleteForm */ static int SpoolssDeleteForm_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { proto_item *hidden_item; char *name = NULL; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_form_name, TRUE, &name); if (name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", name); return offset; } static int SpoolssDeleteForm_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * SetForm */ static int SpoolssSetForm_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { char *name = NULL; guint32 level; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_form_name, TRUE, &name); if (name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", name); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_form_level, &level); col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_FORM_CTR(tvb, offset, pinfo, tree, di, drep); return offset; } static int SpoolssSetForm_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * GetForm */ static int SpoolssGetForm_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; proto_item *hidden_item; guint32 level; char *name; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_form_name, TRUE, &name); col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", name); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_form_level, &level); /* GetForm() stores the level in se_data */ if(!pinfo->fd->flags.visited){ dcv->se_data = GUINT_TO_POINTER((int)level); } col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_spoolss_buffer(tvb, offset, pinfo, tree, di, drep, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static int SpoolssGetForm_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; BUFFER buffer; guint32 level = GPOINTER_TO_UINT(dcv->se_data); proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_form, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_spoolss_buffer( tvb, offset, pinfo, tree, di, drep, &buffer); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); if (buffer.tvb) { int buffer_offset = 0; switch(level) { case 1: { int struct_start = buffer_offset; /*buffer_offset = */dissect_FORM_REL( buffer.tvb, buffer_offset, pinfo, tree, di, drep, struct_start); break; } default: proto_tree_add_expert_format(buffer.tree, pinfo, &ei_form_level, buffer.tvb, buffer_offset, -1, "Unknown form info level %d", level); break; } } offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* A generic reply function that just parses the status code. Useful for unimplemented dissectors so the status code can be inserted into the INFO column. */ static int SpoolssGeneric_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { int len = tvb_reported_length(tvb); proto_tree_add_expert(tree, pinfo, &ei_unimplemented_dissector, tvb, offset, 0); offset = dissect_doserror( tvb, len - 4, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * JOB_INFO_1 */ static gint ett_JOB_INFO_1 = -1; static int dissect_spoolss_JOB_INFO_1(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_item *item; proto_tree *subtree; int struct_start = offset; char *document_name; subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_JOB_INFO_1, &item, "Job info level 1"); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_job_id, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_printername, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_servername, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_username, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_documentname, struct_start, &document_name); proto_item_append_text(item, ": %s", document_name); g_free(document_name); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_datatype, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_textstatus, struct_start, NULL); offset = dissect_job_status(tvb, offset, pinfo, subtree, di, drep); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_job_priority, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_job_position, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_job_totalpages, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_job_pagesprinted, NULL); offset = dissect_SYSTEM_TIME( tvb, offset, pinfo, subtree, di, drep, "Job Submission Time", TRUE, NULL); proto_item_set_len(item, offset - struct_start); return offset; } /* * JOB_INFO_2 */ static gint ett_JOB_INFO_2 = -1; static int dissect_spoolss_JOB_INFO_2(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_item *item; proto_tree *subtree; int struct_start = offset; char *document_name; guint32 devmode_offset, secdesc_offset; subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_JOB_INFO_2, &item, "Job info level 2"); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_job_id, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_printername, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_machinename, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_username, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_documentname, struct_start, &document_name); proto_item_append_text(item, ": %s", document_name); g_free(document_name); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_notifyname, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_datatype, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_printprocessor, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_parameters, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_drivername, struct_start, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_offset, &devmode_offset); dissect_DEVMODE( tvb, devmode_offset - 4 + struct_start, pinfo, subtree, di, drep); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_textstatus, struct_start, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_offset, &secdesc_offset); dissect_nt_sec_desc( tvb, secdesc_offset, pinfo, subtree, drep, FALSE, -1, &spoolss_job_access_mask_info); offset = dissect_job_status(tvb, offset, pinfo, subtree, di, drep); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_job_priority, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_job_position, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_start_time, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_end_time, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_job_totalpages, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_job_size, NULL); offset = dissect_SYSTEM_TIME( tvb, offset, pinfo, subtree, di, drep, "Job Submission Time", TRUE, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_elapsed_time, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_job_pagesprinted, NULL); proto_item_set_len(item, offset - struct_start); return offset; } /* * EnumJobs */ static int hf_enumjobs_firstjob = -1; static int hf_enumjobs_numjobs = -1; static int SpoolssEnumJobs_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 level; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_enumjobs_firstjob, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_enumjobs_numjobs, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); /* EnumJobs() stores the level in se_data */ if(!pinfo->fd->flags.visited){ dcv->se_data = GUINT_TO_POINTER((int)level); } col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_spoolss_buffer(tvb, offset, pinfo, tree, di, drep, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static int SpoolssEnumJobs_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; gint16 level = GPOINTER_TO_UINT(dcv->se_data); BUFFER buffer; guint32 num_jobs, i; int buffer_offset; /* Parse packet */ offset = dissect_spoolss_buffer( tvb, offset, pinfo, tree, di, drep, &buffer); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_enumjobs_numjobs, &num_jobs); buffer_offset = 0; for (i = 0; i < num_jobs; i++) { switch(level) { case 1: buffer_offset = dissect_spoolss_JOB_INFO_1( buffer.tvb, buffer_offset, pinfo, buffer.tree, di, drep); break; case 2: buffer_offset = dissect_spoolss_JOB_INFO_2( buffer.tvb, buffer_offset, pinfo, buffer.tree, di, drep); break; default: proto_tree_add_expert_format( buffer.tree, pinfo, &ei_job_info_level, buffer.tvb, 0, -1, "Unknown job info level %d", level); break; } } offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * SetJob */ static const value_string setjob_commands[] = { { JOB_CONTROL_PAUSE, "Pause" }, { JOB_CONTROL_RESUME, "Resume" }, { JOB_CONTROL_CANCEL, "Cancel" }, { JOB_CONTROL_RESTART, "Restart" }, { JOB_CONTROL_DELETE, "Delete" }, { 0, NULL } }; static int hf_setjob_cmd = -1; static int SpoolssSetJob_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 jobid, cmd; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_job_id, &jobid); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_setjob_cmd, &cmd); col_append_fstr( pinfo->cinfo, COL_INFO, ", %s jobid %d", val_to_str(cmd, setjob_commands, "Unknown (%d)"), jobid); return offset; } static int SpoolssSetJob_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * GetJob */ static int SpoolssGetJob_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 level, jobid; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_job_id, &jobid); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); /* GetJob() stores the level in se_data */ if(!pinfo->fd->flags.visited){ dcv->se_data = GUINT_TO_POINTER((int)level); } col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d, jobid %d", level, jobid); offset = dissect_spoolss_buffer(tvb, offset, pinfo, tree, di, drep, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static int SpoolssGetJob_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; gint32 level = GPOINTER_TO_UINT(dcv->se_data); BUFFER buffer; /* Parse packet */ offset = dissect_spoolss_buffer(tvb, offset, pinfo, tree, di, drep, &buffer); if (buffer.tvb) { int buffer_offset = 0; switch(level) { case 1: /*buffer_offset = */dissect_spoolss_JOB_INFO_1( buffer.tvb, buffer_offset, pinfo, buffer.tree, di, drep); break; case 2: default: proto_tree_add_expert_format( buffer.tree, pinfo, &ei_job_info_level, buffer.tvb, buffer_offset, -1, "Unknown job info level %d", level); break; } } offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * StartPagePrinter */ static int SpoolssStartPagePrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { e_ctx_hnd policy_hnd; char *pol_name; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, NULL, FALSE, FALSE); dcerpc_fetch_polhnd_data(&policy_hnd, &pol_name, NULL, NULL, NULL, pinfo->num); if (pol_name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", pol_name); return offset; } static int SpoolssStartPagePrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * EndPagePrinter */ static int SpoolssEndPagePrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { e_ctx_hnd policy_hnd; char *pol_name; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, NULL, FALSE, FALSE); dcerpc_fetch_polhnd_data(&policy_hnd, &pol_name, NULL, NULL, NULL, pinfo->num); if (pol_name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", pol_name); return offset; } static int SpoolssEndPagePrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * DOC_INFO_1 */ static gint ett_DOC_INFO_1 = -1; static int dissect_spoolss_doc_info_1(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_DOC_INFO_1, NULL, "Document info level 1"); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, subtree, di, drep, NDR_POINTER_UNIQUE, "Document name", hf_documentname, 0); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, subtree, di, drep, NDR_POINTER_UNIQUE, "Output file", hf_outputfile, 0); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, subtree, di, drep, NDR_POINTER_UNIQUE, "Data type", hf_datatype, 0); return offset; } static int dissect_spoolss_doc_info_data(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { if (di->conformant_run) return offset; return dissect_spoolss_doc_info_1(tvb, offset, pinfo, tree, di, drep); } /* * DOC_INFO */ static gint ett_DOC_INFO = -1; static int dissect_spoolss_doc_info(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; guint32 level; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_DOC_INFO, NULL, "Document info"); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_level, &level); offset = dissect_ndr_pointer( tvb, offset, pinfo, subtree, di, drep, dissect_spoolss_doc_info_data, NDR_POINTER_UNIQUE, "Document info", -1); return offset; } /* * DOC_INFO_CTR */ static gint ett_DOC_INFO_CTR = -1; static int dissect_spoolss_doc_info_ctr(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_DOC_INFO_CTR, NULL, "Document info container"); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_level, NULL); offset = dissect_spoolss_doc_info( tvb, offset, pinfo, subtree, di, drep); return offset; } /* * StartDocPrinter */ static int SpoolssStartDocPrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { e_ctx_hnd policy_hnd; char *pol_name; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, NULL, FALSE, FALSE); dcerpc_fetch_polhnd_data(&policy_hnd, &pol_name, NULL, NULL, NULL, pinfo->num); if (pol_name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", pol_name); offset = dissect_spoolss_doc_info_ctr(tvb, offset, pinfo, tree, di, drep); return offset; } static int SpoolssStartDocPrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_job_id, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * EndDocPrinter */ static int SpoolssEndDocPrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { e_ctx_hnd policy_hnd; char *pol_name; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, NULL, FALSE, FALSE); dcerpc_fetch_polhnd_data(&policy_hnd, &pol_name, NULL, NULL, NULL, pinfo->num); if (pol_name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", pol_name); return offset; } static int SpoolssEndDocPrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * WritePrinter */ static gint ett_writeprinter_buffer = -1; static int hf_writeprinter_numwritten = -1; static int SpoolssWritePrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { e_ctx_hnd policy_hnd; char *pol_name; guint32 size; proto_item *item; proto_tree *subtree; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, NULL, FALSE, FALSE); dcerpc_fetch_polhnd_data(&policy_hnd, &pol_name, NULL, NULL, NULL, pinfo->num); if (pol_name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", pol_name); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_buffer_size, &size); col_append_fstr(pinfo->cinfo, COL_INFO, ", %d bytes", size); subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_writeprinter_buffer, &item, "Buffer"); offset = dissect_ndr_uint8s(tvb, offset, pinfo, subtree, di, drep, hf_buffer_data, size, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_buffer_size, NULL); proto_item_set_len(item, size + 4); return offset; } static int SpoolssWritePrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 size; /* Parse packet */ offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_writeprinter_numwritten, &size); col_append_fstr( pinfo->cinfo, COL_INFO, ", %d bytes written", size); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * DeletePrinterData */ static int SpoolssDeletePrinterData_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { char *value_name; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_printerdata_value, TRUE, &value_name); col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", value_name); return offset; } static int SpoolssDeletePrinterData_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * DRIVER_INFO_1 */ static gint ett_DRIVER_INFO_1 = -1; static int dissect_DRIVER_INFO_1(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; int struct_start = offset; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_DRIVER_INFO_1, NULL, "Driver info level 1"); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_drivername, struct_start, NULL); return offset; } /* * DRIVER_INFO_2 */ static const value_string driverinfo_cversion_vals[] = { { 0, "Windows 95/98/Me" }, { 2, "Windows NT 4.0" }, { 3, "Windows 2000/XP" }, { 0, NULL } }; static gint ett_DRIVER_INFO_2 = -1; static int dissect_DRIVER_INFO_2(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; int struct_start = offset; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_DRIVER_INFO_2, NULL, "Driver info level 2"); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_driverinfo_cversion, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_drivername, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_environment, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_driverpath, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_datafile, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_configfile, struct_start, NULL); return offset; } /* * DRIVER_INFO_3 */ static gint ett_DRIVER_INFO_3 = -1; static int dissect_DRIVER_INFO_3(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; int struct_start = offset; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_DRIVER_INFO_3, NULL, "Driver info level 3"); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_driverinfo_cversion, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_drivername, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_environment, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_driverpath, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_datafile, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_configfile, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_helpfile, struct_start, NULL); offset = dissect_spoolss_relstrarray( tvb, offset, pinfo, subtree, di, drep, hf_dependentfiles, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_monitorname, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_defaultdatatype, struct_start, NULL); return offset; } /* DRIVER_INFO_6 */ static gint ett_DRIVER_INFO_6 = -1; static int dissect_DRIVER_INFO_6(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; int struct_start = offset; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_DRIVER_INFO_6, NULL, "Driver info level 6"); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_driverinfo_cversion, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_drivername, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_environment, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_driverpath, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_datafile, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_configfile, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_helpfile, struct_start, NULL); offset = dissect_spoolss_relstrarray( tvb, offset, pinfo, subtree, di, drep, hf_dependentfiles, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_monitorname, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_defaultdatatype, struct_start, NULL); offset = dissect_spoolss_relstrarray( tvb, offset, pinfo, subtree, di, drep, hf_previousdrivernames, struct_start, NULL); offset = dissect_ndr_nt_NTTIME ( tvb, offset, pinfo, subtree, di, drep,hf_driverdate); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_padding, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_driver_version_low, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_driver_version_high, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_mfgname, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_oemurl, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_hardwareid, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_provider, struct_start, NULL); return offset; } static gint ett_DRIVER_INFO_101 = -1; static int dissect_DRIVER_INFO_101(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_tree *subtree; int struct_start = offset; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_DRIVER_INFO_101, NULL, "Driver info level 101"); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_driverinfo_cversion, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_drivername, struct_start, NULL); offset = dissect_spoolss_relstr( tvb, offset, pinfo, subtree, di, drep, hf_environment, struct_start, NULL); proto_tree_add_expert(subtree, pinfo, &ei_unknown_data, tvb, offset, 0); return offset; } /* * EnumPrinterDrivers */ static int SpoolssEnumPrinterDrivers_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 level; /* Parse packet */ offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, tree, di, drep, NDR_POINTER_UNIQUE, "Name", hf_servername, 0); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, tree, di, drep, NDR_POINTER_UNIQUE, "Environment", hf_environment, 0); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); /* EnumPrinterDrivers() stores the level in se_data */ if(!pinfo->fd->flags.visited){ dcv->se_data = GUINT_TO_POINTER((int)level); } col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_spoolss_buffer(tvb, offset, pinfo, tree, di, drep, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static int SpoolssEnumPrinterDrivers_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 level = GPOINTER_TO_UINT(dcv->se_data), num_drivers, i; int buffer_offset; BUFFER buffer; /* Parse packet */ offset = dissect_spoolss_buffer(tvb, offset, pinfo, tree, di, drep, &buffer); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_returned, &num_drivers); buffer_offset = 0; for (i = 0; i < num_drivers; i++) { switch(level) { case 1: buffer_offset = dissect_DRIVER_INFO_1( buffer.tvb, buffer_offset, pinfo, buffer.tree, di, drep); break; case 2: buffer_offset = dissect_DRIVER_INFO_2( buffer.tvb, buffer_offset, pinfo, buffer.tree, di, drep); break; case 3: buffer_offset = dissect_DRIVER_INFO_3( buffer.tvb, buffer_offset, pinfo, buffer.tree, di, drep); break; case 6: buffer_offset = dissect_DRIVER_INFO_6( buffer.tvb, buffer_offset, pinfo, buffer.tree, di, drep); break; case 101: /*buffer_offset =*/ dissect_DRIVER_INFO_101( buffer.tvb, buffer_offset, pinfo, buffer.tree, di, drep); /*break;*/ goto done; /*Not entirely imeplemented*/ default: proto_tree_add_expert_format( buffer.tree, pinfo, &ei_driver_info_level, buffer.tvb, buffer_offset, -1, "Unknown driver info level %d", level); goto done; } } done: offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * GetPrinterDriver2 */ static int SpoolssGetPrinterDriver2_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; e_ctx_hnd policy_hnd; char *pol_name; guint32 level; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, NULL, FALSE, FALSE); dcerpc_fetch_polhnd_data(&policy_hnd, &pol_name, NULL, NULL, NULL, pinfo->num); col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", pol_name); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, tree, di, drep, NDR_POINTER_UNIQUE, "Environment", hf_environment, 0); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); /* GetPrinterDriver2() stores the level in se_data */ if(!pinfo->fd->flags.visited){ dcv->se_data = GUINT_TO_POINTER((int)level); } col_append_fstr(pinfo->cinfo, COL_INFO, ", level %d", level); offset = dissect_spoolss_buffer(tvb, offset, pinfo, tree, di, drep, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_clientmajorversion, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_clientminorversion, NULL); return offset; } static int SpoolssGetPrinterDriver2_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; guint32 level = GPOINTER_TO_UINT(dcv->se_data); BUFFER buffer; /* Parse packet */ offset = dissect_spoolss_buffer(tvb, offset, pinfo, tree, di, drep, &buffer); if (buffer.tvb) { switch(level) { case 1: dissect_DRIVER_INFO_1( buffer.tvb, 0, pinfo, buffer.tree, di, drep); break; case 2: dissect_DRIVER_INFO_2( buffer.tvb, 0, pinfo, buffer.tree, di, drep); break; case 3: dissect_DRIVER_INFO_3( buffer.tvb, 0, pinfo, buffer.tree, di, drep); break; case 6: dissect_DRIVER_INFO_6( buffer.tvb, 0, pinfo, buffer.tree, di, drep); break; case 101: dissect_DRIVER_INFO_101( buffer.tvb, 0, pinfo, buffer.tree, di, drep); break; default: proto_tree_add_expert_format( buffer.tree, pinfo, &ei_driver_info_level, buffer.tvb, 0, -1, "Unknown driver info level %d", level); break; } } offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_servermajorversion, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_serverminorversion, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } static int dissect_notify_info_data_buffer(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 len; offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_buffer_len, &len); offset = dissect_ndr_uint16s( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_buffer_data, len); return offset; } static void cb_notify_str_postprocess(packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item, dcerpc_info *di _U_, tvbuff_t *tvb, int start_offset, int end_offset, void *callback_args) { gint levels, hf_index = GPOINTER_TO_INT(callback_args); guint32 len; char *s; proto_item *hidden_item; /* Align start_offset on 4-byte boundary. */ if (start_offset % 4) start_offset += 4 - (start_offset % 4); /* Get string length */ len = tvb_get_letohl(tvb, start_offset); s = tvb_get_string_enc(NULL, tvb, start_offset + 4, (end_offset - start_offset - 4), ENC_UTF_16|ENC_LITTLE_ENDIAN); /* Append string to upper-level proto_items */ levels = 2; if (levels > 0 && item && s && s[0]) { proto_item_append_text(item, ": %s", s); item = item->parent; levels--; if (levels > 0) { proto_item_append_text(item, ": %s", s); item = item->parent; levels--; while (levels > 0) { proto_item_append_text(item, " %s", s); item = item->parent; levels--; } } } /* Add hidden field so filter brings up any notify data */ if (hf_index != -1) { hidden_item = proto_tree_add_string( tree, hf_index, tvb, start_offset, len, s); PROTO_ITEM_SET_HIDDEN(hidden_item); } g_free(s); } /* Return the hf_index for a printer notify field. This is used to add a hidden string to the display so that filtering will bring up relevant notify data. */ static int printer_notify_hf_index(int field) { int result = -1; switch(field) { case PRINTER_NOTIFY_SERVER_NAME: result = hf_servername; break; case PRINTER_NOTIFY_PRINTER_NAME: result = hf_printername; break; case PRINTER_NOTIFY_SHARE_NAME: result = hf_sharename; break; case PRINTER_NOTIFY_PORT_NAME: result = hf_portname; break; case PRINTER_NOTIFY_DRIVER_NAME: result = hf_drivername; break; case PRINTER_NOTIFY_COMMENT: result = hf_printercomment; break; case PRINTER_NOTIFY_LOCATION: result = hf_printerlocation; break; case PRINTER_NOTIFY_SEPFILE: result = hf_sepfile; break; case PRINTER_NOTIFY_PRINT_PROCESSOR: result = hf_printprocessor; break; case PRINTER_NOTIFY_PARAMETERS: result = hf_parameters; break; case PRINTER_NOTIFY_DATATYPE: result = hf_parameters; break; } return result; } static int job_notify_hf_index(int field) { int result = -1; switch(field) { case JOB_NOTIFY_PRINTER_NAME: result = hf_printername; break; case JOB_NOTIFY_MACHINE_NAME: result = hf_machinename; break; case JOB_NOTIFY_PORT_NAME: result = hf_portname; break; case JOB_NOTIFY_USER_NAME: result = hf_username; break; case JOB_NOTIFY_NOTIFY_NAME: result = hf_notifyname; break; case JOB_NOTIFY_DATATYPE: result = hf_datatype; break; case JOB_NOTIFY_PRINT_PROCESSOR: result = hf_printprocessor; break; case JOB_NOTIFY_DRIVER_NAME: result = hf_drivername; break; case JOB_NOTIFY_DOCUMENT: result = hf_documentname; break; case JOB_NOTIFY_PRIORITY: result = hf_job_priority; break; case JOB_NOTIFY_POSITION: result = hf_job_position; break; case JOB_NOTIFY_TOTAL_PAGES: result = hf_job_totalpages; break; case JOB_NOTIFY_PAGES_PRINTED: result = hf_job_pagesprinted; break; case JOB_NOTIFY_TOTAL_BYTES: result = hf_job_totalbytes; break; case JOB_NOTIFY_BYTES_PRINTED: result = hf_job_bytesprinted; break; } return result; } static int dissect_NOTIFY_INFO_DATA_printer(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, proto_item *item, dcerpc_info *di, guint8 *drep, guint16 field) { guint32 value1; switch (field) { /* String notify data */ case PRINTER_NOTIFY_SERVER_NAME: case PRINTER_NOTIFY_PRINTER_NAME: case PRINTER_NOTIFY_SHARE_NAME: case PRINTER_NOTIFY_DRIVER_NAME: case PRINTER_NOTIFY_COMMENT: case PRINTER_NOTIFY_LOCATION: case PRINTER_NOTIFY_SEPFILE: case PRINTER_NOTIFY_PRINT_PROCESSOR: case PRINTER_NOTIFY_PARAMETERS: case PRINTER_NOTIFY_DATATYPE: case PRINTER_NOTIFY_PORT_NAME: offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_bufsize, &value1); offset = dissect_ndr_pointer_cb( tvb, offset, pinfo, tree, di, drep, dissect_notify_info_data_buffer, NDR_POINTER_UNIQUE, "String", hf_notify_info_data_buffer, cb_notify_str_postprocess, GINT_TO_POINTER(printer_notify_hf_index(field))); break; case PRINTER_NOTIFY_ATTRIBUTES: /* Value 1 is the printer attributes */ offset = dissect_printer_attributes( tvb, offset, pinfo, tree, di, drep); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_notify_info_data_value2, NULL); break; case PRINTER_NOTIFY_STATUS: { guint32 status; /* Value 1 is the printer status */ offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_printer_status, &status); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_notify_info_data_value2, NULL); proto_item_append_text( item, ": %s", val_to_str_ext_const(status, &printer_status_vals_ext, "Unknown")); break; } /* Unknown notify data */ case PRINTER_NOTIFY_SECURITY_DESCRIPTOR: /* Secdesc */ case PRINTER_NOTIFY_DEVMODE: /* Devicemode */ offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_bufsize, &value1); offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_notify_info_data_buffer, NDR_POINTER_UNIQUE, "Buffer", hf_notify_info_data_buffer); break; default: offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_value1, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_value2, NULL); break; } return offset; } static void notify_job_time_cb(packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item, dcerpc_info *di, tvbuff_t *tvb _U_, int start_offset _U_, int end_offset _U_, void *callback_args _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; char *str = (char *)dcv->private_data; /* Append job string stored in dcv->private_data by dissect_SYSTEM_TIME_ptr() in the current item as well as the parent. */ proto_item_append_text(item, ": %s", str); if (item) proto_item_append_text(item->parent, ": %s", str); } static int dissect_NOTIFY_INFO_DATA_job(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, proto_item *item, dcerpc_info *di, guint8 *drep, guint16 field) { guint32 value1; proto_item *hidden_item; switch (field) { /* String notify data */ case JOB_NOTIFY_PRINTER_NAME: case JOB_NOTIFY_MACHINE_NAME: case JOB_NOTIFY_PORT_NAME: case JOB_NOTIFY_USER_NAME: case JOB_NOTIFY_NOTIFY_NAME: case JOB_NOTIFY_DATATYPE: case JOB_NOTIFY_PRINT_PROCESSOR: case JOB_NOTIFY_PARAMETERS: case JOB_NOTIFY_DRIVER_NAME: case JOB_NOTIFY_STATUS_STRING: case JOB_NOTIFY_DOCUMENT: offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_bufsize, &value1); offset = dissect_ndr_pointer_cb( tvb, offset, pinfo, tree, di, drep, dissect_notify_info_data_buffer, NDR_POINTER_UNIQUE, "String", hf_notify_info_data_buffer, cb_notify_str_postprocess, GINT_TO_POINTER(job_notify_hf_index(field))); break; case JOB_NOTIFY_STATUS: offset = dissect_job_status( tvb, offset, pinfo, tree, di, drep); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_notify_info_data_value2, NULL); break; case JOB_NOTIFY_SUBMITTED: /* SYSTEM_TIME */ offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_buffer_len, NULL); offset = dissect_ndr_pointer_cb( tvb, offset, pinfo, tree, di, drep, dissect_SYSTEM_TIME_ptr, NDR_POINTER_UNIQUE, "Time submitted", -1, notify_job_time_cb, NULL); break; case JOB_NOTIFY_PRIORITY: case JOB_NOTIFY_POSITION: case JOB_NOTIFY_TOTAL_PAGES: case JOB_NOTIFY_PAGES_PRINTED: case JOB_NOTIFY_TOTAL_BYTES: case JOB_NOTIFY_BYTES_PRINTED: { guint32 value; offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_value1, &value); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_value2, NULL); proto_item_append_text(item, ": %d", value); hidden_item = proto_tree_add_uint( tree, job_notify_hf_index(field), tvb, offset, 4, value); PROTO_ITEM_SET_HIDDEN(hidden_item); break; } /* Unknown notify data */ case JOB_NOTIFY_DEVMODE: offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_bufsize, &value1); offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_notify_info_data_buffer, NDR_POINTER_UNIQUE, "Buffer", hf_notify_info_data_buffer); break; default: offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_value1, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_notify_info_data_value2, NULL); } return offset; } static gint ett_NOTIFY_INFO_DATA = -1; static int dissect_NOTIFY_INFO_DATA(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { proto_item *item; proto_tree *subtree; guint32 count; guint16 type, field; const char *field_string; subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_NOTIFY_INFO_DATA, &item, ""); offset = dissect_ndr_uint16( tvb, offset, pinfo, subtree, di, drep, hf_notify_info_data_type, &type); offset = dissect_notify_field( tvb, offset, pinfo, subtree, di, drep, type, &field); switch(type) { case PRINTER_NOTIFY_TYPE: field_string = val_to_str_ext( field, &printer_notify_option_data_vals_ext, "Unknown (%d)"); break; case JOB_NOTIFY_TYPE: field_string = val_to_str_ext( field, &job_notify_option_data_vals_ext, "Unknown (%d)"); break; default: field_string = "Unknown field"; break; } proto_item_append_text( item, "%s, %s", val_to_str(type, printer_notify_types, "Unknown (%d)"), field_string); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_notify_info_data_count, &count); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_notify_info_data_id, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_notify_info_data_count, NULL); /* The value here depends on (type, field) */ switch (type) { case PRINTER_NOTIFY_TYPE: offset = dissect_NOTIFY_INFO_DATA_printer( tvb, offset, pinfo, subtree, item, di, drep, field); break; case JOB_NOTIFY_TYPE: offset = dissect_NOTIFY_INFO_DATA_job( tvb, offset, pinfo, subtree, item, di, drep, field); break; default: expert_add_info(pinfo, item, &ei_notify_info_data_type); break; } return offset; } static int dissect_NOTIFY_INFO(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 count; offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_notify_info_version, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_notify_info_flags, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_notify_info_count, &count); if (!di->conformant_run) col_append_fstr( pinfo->cinfo, COL_INFO, ", %d %s", count, notify_plural(count)); offset = dissect_ndr_ucarray(tvb, offset, pinfo, tree, di, drep, dissect_NOTIFY_INFO_DATA); return offset; } /* * RFNPCNEX */ static int SpoolssRFNPCNEX_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 changeid; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_rrpcn_changelow, &changeid); col_append_fstr( pinfo->cinfo, COL_INFO, ", changeid %d", changeid); offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_NOTIFY_OPTIONS_ARRAY_CTR, NDR_POINTER_UNIQUE, "Notify Options Array Container", -1); return offset; } static int SpoolssRFNPCNEX_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_NOTIFY_INFO, NDR_POINTER_UNIQUE, "Notify Info", -1); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * RRPCN */ static int SpoolssRRPCN_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 changeid; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_rrpcn_changelow, &changeid); col_append_fstr( pinfo->cinfo, COL_INFO, ", changeid %d", changeid); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_rrpcn_changehigh, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_rrpcn_unk0, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_rrpcn_unk1, NULL); offset = dissect_ndr_pointer( tvb, offset, pinfo, tree, di, drep, dissect_NOTIFY_INFO, NDR_POINTER_UNIQUE, "Notify Info", -1); /* Notify info */ return offset; } static int SpoolssRRPCN_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_rrpcn_unk0, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * ReplyClosePrinter */ static int SpoolssReplyClosePrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, TRUE); return offset; } static int SpoolssReplyClosePrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * FCPN */ static int SpoolssFCPN_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); return offset; } static int SpoolssFCPN_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * RouterReplyPrinter */ static int hf_routerreplyprinter_condition = -1; static int hf_routerreplyprinter_unknown1 = -1; static int hf_routerreplyprinter_changeid = -1; static int SpoolssRouterReplyPrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_routerreplyprinter_condition, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_routerreplyprinter_unknown1, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_routerreplyprinter_changeid, NULL); return offset; } static int SpoolssRouterReplyPrinter_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } static int hf_keybuffer_size = -1; static int dissect_spoolss_keybuffer(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 size; int end_offset; if (di->conformant_run) return offset; /* Dissect size and data */ offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_keybuffer_size, &size); end_offset = offset + (size*2); if (end_offset < offset) { /* * Overflow - make the end offset one past the end of * the packet data, so we throw an exception (as the * size is almost certainly too big). */ end_offset = tvb_reported_length_remaining(tvb, offset) + 1; } while (offset < end_offset) offset = dissect_spoolss_uint16uni( tvb, offset, pinfo, tree, drep, NULL, hf_keybuffer); return offset; } static int SpoolssEnumPrinterKey_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { char *key_name; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_printerdata_key, TRUE, &key_name); if (!key_name[0]) key_name = "\"\""; col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", key_name); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); return offset; } static int SpoolssEnumPrinterKey_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_spoolss_keybuffer(tvb, offset, pinfo, tree, di, drep); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } static int hf_enumprinterdataex_name_offset = -1; static int hf_enumprinterdataex_name_len = -1; static int hf_enumprinterdataex_name = -1; static int hf_enumprinterdataex_val_offset = -1; static int hf_enumprinterdataex_val_len = -1; static int hf_enumprinterdataex_val_dword_low = -1; static int hf_enumprinterdataex_val_dword_high = -1; static int hf_enumprinterdataex_value_null = -1; static int hf_enumprinterdataex_value_uint = -1; static int hf_enumprinterdataex_value_binary = -1; static int hf_enumprinterdataex_value_multi_sz = -1; static int SpoolssEnumPrinterDataEx_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { char *key_name; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL, FALSE, FALSE); offset = dissect_ndr_cvstring( tvb, offset, pinfo, tree, di, drep, sizeof(guint16), hf_printerdata_key, TRUE, &key_name); col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", key_name); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static gint ett_printer_enumdataex_value = -1; static int dissect_spoolss_printer_enum_values(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 start_offset = offset; guint32 name_offset, name_len, val_offset, val_len, val_type; char *name; proto_item *item; proto_tree *subtree; /* Get offset of value name */ offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_enumprinterdataex_name_offset, &name_offset); offset = dissect_ndr_uint32( tvb, offset, pinfo, NULL, di, drep, hf_enumprinterdataex_name_len, &name_len); dissect_spoolss_uint16uni( tvb, start_offset + name_offset, pinfo, NULL, drep, &name, hf_enumprinterdataex_name); subtree = proto_tree_add_subtree_format(tree, tvb, offset, 0, ett_printer_enumdataex_value, &item, "Name: %s", name); proto_tree_add_uint(subtree, hf_enumprinterdataex_name_offset, tvb, offset - 8, 4, name_offset); proto_tree_add_uint(subtree, hf_enumprinterdataex_name_len, tvb, offset - 4, 4, name_len); proto_tree_add_string( subtree, hf_enumprinterdataex_name, tvb, start_offset + name_offset, ((int)strlen(name) + 1) * 2, name); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_printerdata_type, &val_type); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_enumprinterdataex_val_offset, &val_offset); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_enumprinterdataex_val_len, &val_len); if (val_len == 0) { proto_tree_add_uint_format_value(subtree, hf_enumprinterdataex_value_null, tvb, start_offset + val_offset, 4, 0, "(null)"); goto done; } switch(val_type) { case DCERPC_REG_DWORD: { guint32 value; guint16 low, high; int offset2 = start_offset + val_offset; /* Needs to be broken into two 16-byte ints because it may not be aligned. */ offset2 = dissect_ndr_uint16( tvb, offset2, pinfo, subtree, di, drep, hf_enumprinterdataex_val_dword_low, &low); /*offset2 = */dissect_ndr_uint16( tvb, offset2, pinfo, subtree, di, drep, hf_enumprinterdataex_val_dword_high, &high); value = (high << 16) | low; proto_tree_add_uint(subtree, hf_enumprinterdataex_value_uint, tvb, start_offset + val_offset, 4, value); proto_item_append_text(item, ", Value: %d", value); break; } case DCERPC_REG_SZ: { char *value; dissect_spoolss_uint16uni( tvb, start_offset + val_offset, pinfo, subtree, drep, &value, hf_value_string); proto_item_append_text(item, ", Value: %s", value); g_free(value); break; } case DCERPC_REG_BINARY: /* FIXME: nicer way to display this */ proto_tree_add_bytes_format_value( subtree, hf_enumprinterdataex_value_binary, tvb, start_offset + val_offset, val_len, NULL, "<binary data>"); break; case DCERPC_REG_MULTI_SZ: /* FIXME: implement REG_MULTI_SZ support */ proto_tree_add_bytes_format_value(subtree, hf_enumprinterdataex_value_multi_sz, tvb, start_offset + val_offset, val_len, NULL, "<REG_MULTI_SZ not implemented>"); break; default: proto_tree_add_expert_format( subtree, pinfo, &ei_enumprinterdataex_value, tvb, start_offset + val_offset, val_len, "%s: unknown type %d", name, val_type); } done: g_free(name); return offset; } static gint ett_PRINTER_DATA_CTR = -1; static int SpoolssEnumPrinterDataEx_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 size, num_values; proto_item *hidden_item; hidden_item = proto_tree_add_uint( tree, hf_printerdata, tvb, offset, 0, 1); PROTO_ITEM_SET_HIDDEN(hidden_item); /* Parse packet */ offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_buffer_size, &size); dissect_ndr_uint32( tvb, offset + size + 4, pinfo, NULL, di, drep, hf_returned, &num_values); if (size) { proto_tree *subtree; int offset2 = offset; guint32 i; subtree = proto_tree_add_subtree( tree, tvb, offset, 0, ett_PRINTER_DATA_CTR, NULL, "Printer data"); for (i=0; i < num_values; i++) offset2 = dissect_spoolss_printer_enum_values( tvb, offset2, pinfo, subtree, di, drep); } offset += size; offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_returned, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } static int SpoolssGetPrinterDriverDirectory_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { guint32 level; /* Parse packet */ offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, tree, di, drep, NDR_POINTER_UNIQUE, "Name", hf_servername, 0); offset = dissect_ndr_str_pointer_item( tvb, offset, pinfo, tree, di, drep, NDR_POINTER_UNIQUE, "Environment", hf_environment, 0); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_level, &level); offset = dissect_spoolss_buffer( tvb, offset, pinfo, tree, di, drep, NULL); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_offered, NULL); return offset; } static int SpoolssGetPrinterDriverDirectory_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { /* Parse packet */ offset = dissect_spoolss_string_parm( tvb, offset, pinfo, tree, di, drep, "Directory"); offset = dissect_ndr_uint32( tvb, offset, pinfo, tree, di, drep, hf_needed, NULL); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, NULL); return offset; } /* * List of subdissectors for this pipe. */ static dcerpc_sub_dissector dcerpc_spoolss_dissectors[] = { { SPOOLSS_ENUMPRINTERS, "EnumPrinters", SpoolssEnumPrinters_q, SpoolssEnumPrinters_r }, { SPOOLSS_OPENPRINTER, "OpenPrinter", NULL, SpoolssGeneric_r }, { SPOOLSS_SETJOB, "SetJob", SpoolssSetJob_q, SpoolssSetJob_r }, { SPOOLSS_GETJOB, "GetJob", SpoolssGetJob_q, SpoolssGetJob_r }, { SPOOLSS_ENUMJOBS, "EnumJobs", SpoolssEnumJobs_q, SpoolssEnumJobs_r }, { SPOOLSS_ADDPRINTER, "AddPrinter", NULL, SpoolssGeneric_r }, { SPOOLSS_DELETEPRINTER, "DeletePrinter", SpoolssDeletePrinter_q, SpoolssDeletePrinter_r }, { SPOOLSS_SETPRINTER, "SetPrinter", SpoolssSetPrinter_q, SpoolssSetPrinter_r }, { SPOOLSS_GETPRINTER, "GetPrinter", SpoolssGetPrinter_q, SpoolssGetPrinter_r }, { SPOOLSS_ADDPRINTERDRIVER, "AddPrinterDriver", NULL, SpoolssAddPrinterDriver_r }, { SPOOLSS_ENUMPRINTERDRIVERS, "EnumPrinterDrivers", SpoolssEnumPrinterDrivers_q, SpoolssEnumPrinterDrivers_r }, { SPOOLSS_GETPRINTERDRIVER, "GetPrinterDriver", NULL, SpoolssGeneric_r }, { SPOOLSS_GETPRINTERDRIVERDIRECTORY, "GetPrinterDriverDirectory", SpoolssGetPrinterDriverDirectory_q, SpoolssGetPrinterDriverDirectory_r }, { SPOOLSS_DELETEPRINTERDRIVER, "DeletePrinterDriver", NULL, SpoolssGeneric_r }, { SPOOLSS_ADDPRINTPROCESSOR, "AddPrintProcessor", NULL, SpoolssGeneric_r }, { SPOOLSS_ENUMPRINTPROCESSORS, "EnumPrintProcessor", NULL, SpoolssGeneric_r }, { SPOOLSS_GETPRINTPROCESSORDIRECTORY, "GetPrintProcessorDirectory", NULL, SpoolssGeneric_r }, { SPOOLSS_STARTDOCPRINTER, "StartDocPrinter", SpoolssStartDocPrinter_q, SpoolssStartDocPrinter_r }, { SPOOLSS_STARTPAGEPRINTER, "StartPagePrinter", SpoolssStartPagePrinter_q, SpoolssStartPagePrinter_r }, { SPOOLSS_WRITEPRINTER, "WritePrinter", SpoolssWritePrinter_q, SpoolssWritePrinter_r }, { SPOOLSS_ENDPAGEPRINTER, "EndPagePrinter", SpoolssEndPagePrinter_q, SpoolssEndPagePrinter_r }, { SPOOLSS_ABORTPRINTER, "AbortPrinter", NULL, SpoolssGeneric_r }, { SPOOLSS_READPRINTER, "ReadPrinter", NULL, SpoolssGeneric_r }, { SPOOLSS_ENDDOCPRINTER, "EndDocPrinter", SpoolssEndDocPrinter_q, SpoolssEndDocPrinter_r }, { SPOOLSS_ADDJOB, "AddJob", NULL, SpoolssGeneric_r }, { SPOOLSS_SCHEDULEJOB, "ScheduleJob", NULL, SpoolssGeneric_r }, { SPOOLSS_GETPRINTERDATA, "GetPrinterData", SpoolssGetPrinterData_q, SpoolssGetPrinterData_r }, { SPOOLSS_SETPRINTERDATA, "SetPrinterData", SpoolssSetPrinterData_q, SpoolssSetPrinterData_r }, { SPOOLSS_WAITFORPRINTERCHANGE, "WaitForPrinterChange", NULL, SpoolssGeneric_r }, { SPOOLSS_CLOSEPRINTER, "ClosePrinter", SpoolssClosePrinter_q, SpoolssClosePrinter_r }, { SPOOLSS_ADDFORM, "AddForm", SpoolssAddForm_q, SpoolssAddForm_r }, { SPOOLSS_DELETEFORM, "DeleteForm", SpoolssDeleteForm_q, SpoolssDeleteForm_r }, { SPOOLSS_GETFORM, "GetForm", SpoolssGetForm_q, SpoolssGetForm_r }, { SPOOLSS_SETFORM, "SetForm", SpoolssSetForm_q, SpoolssSetForm_r }, { SPOOLSS_ENUMFORMS, "EnumForms", SpoolssEnumForms_q, SpoolssEnumForms_r }, { SPOOLSS_ENUMPORTS, "EnumPorts", NULL, SpoolssGeneric_r }, { SPOOLSS_ENUMMONITORS, "EnumMonitors", NULL, SpoolssGeneric_r }, { SPOOLSS_ADDPORT, "AddPort", NULL, SpoolssGeneric_r }, { SPOOLSS_CONFIGUREPORT, "ConfigurePort", NULL, SpoolssGeneric_r }, { SPOOLSS_DELETEPORT, "DeletePort", NULL, SpoolssGeneric_r }, { SPOOLSS_CREATEPRINTERIC, "CreatePrinterIC", NULL, SpoolssGeneric_r }, { SPOOLSS_PLAYGDISCRIPTONPRINTERIC, "PlayDiscriptOnPrinterIC", NULL, SpoolssGeneric_r }, { SPOOLSS_DELETEPRINTERIC, "DeletePrinterIC", NULL, SpoolssGeneric_r }, { SPOOLSS_ADDPRINTERCONNECTION, "AddPrinterConnection", NULL, SpoolssGeneric_r }, { SPOOLSS_DELETEPRINTERCONNECTION, "DeletePrinterConnection", NULL, SpoolssGeneric_r }, { SPOOLSS_PRINTERMESSAGEBOX, "PrinterMessageBox", NULL, SpoolssGeneric_r }, { SPOOLSS_ADDMONITOR, "AddMonitor", NULL, SpoolssGeneric_r }, { SPOOLSS_DELETEMONITOR, "DeleteMonitor", NULL, SpoolssGeneric_r }, { SPOOLSS_DELETEPRINTPROCESSOR, "DeletePrintProcessor", NULL, SpoolssGeneric_r }, { SPOOLSS_ADDPRINTPROVIDER, "AddPrintProvider", NULL, SpoolssGeneric_r }, { SPOOLSS_DELETEPRINTPROVIDER, "DeletePrintProvider", NULL, SpoolssGeneric_r }, { SPOOLSS_ENUMPRINTPROCDATATYPES, "EnumPrintProcDataTypes", NULL, SpoolssGeneric_r }, { SPOOLSS_RESETPRINTER, "ResetPrinter", NULL, SpoolssGeneric_r }, { SPOOLSS_GETPRINTERDRIVER2, "GetPrinterDriver2", SpoolssGetPrinterDriver2_q, SpoolssGetPrinterDriver2_r }, { SPOOLSS_FINDFIRSTPRINTERCHANGENOTIFICATION, "FindFirstPrinterChangeNotification", NULL, SpoolssGeneric_r }, { SPOOLSS_FINDNEXTPRINTERCHANGENOTIFICATION, "FindNextPrinterChangeNotification", NULL, SpoolssGeneric_r }, { SPOOLSS_FCPN, "FCPN", SpoolssFCPN_q, SpoolssFCPN_r }, { SPOOLSS_ROUTERFINDFIRSTPRINTERNOTIFICATIONOLD, "RouterFindFirstPrinterNotificationOld", NULL, SpoolssGeneric_r }, { SPOOLSS_REPLYOPENPRINTER, "ReplyOpenPrinter", SpoolssReplyOpenPrinter_q, SpoolssReplyOpenPrinter_r }, { SPOOLSS_ROUTERREPLYPRINTER, "RouterReplyPrinter", SpoolssRouterReplyPrinter_q, SpoolssRouterReplyPrinter_r }, { SPOOLSS_REPLYCLOSEPRINTER, "ReplyClosePrinter", SpoolssReplyClosePrinter_q, SpoolssReplyClosePrinter_r }, { SPOOLSS_ADDPORTEX, "AddPortEx", NULL, SpoolssGeneric_r }, { SPOOLSS_REMOTEFINDFIRSTPRINTERCHANGENOTIFICATION, "RemoteFindFirstPrinterChangeNotification", NULL, SpoolssGeneric_r }, { SPOOLSS_SPOOLERINIT, "SpoolerInit", NULL, SpoolssGeneric_r }, { SPOOLSS_RESETPRINTEREX, "ResetPrinterEx", NULL, SpoolssGeneric_r }, { SPOOLSS_RFFPCNEX, "RFFPCNEX", SpoolssRFFPCNEX_q, SpoolssRFFPCNEX_r }, { SPOOLSS_RRPCN, "RRPCN", SpoolssRRPCN_q, SpoolssRRPCN_r }, { SPOOLSS_RFNPCNEX, "RFNPCNEX", SpoolssRFNPCNEX_q, SpoolssRFNPCNEX_r }, { SPOOLSS_OPENPRINTEREX, "OpenPrinterEx", SpoolssOpenPrinterEx_q, SpoolssOpenPrinterEx_r }, { SPOOLSS_ADDPRINTEREX, "AddPrinterEx", NULL, SpoolssAddPrinterEx_r }, { SPOOLSS_ENUMPRINTERDATA, "EnumPrinterData", SpoolssEnumPrinterData_q, SpoolssEnumPrinterData_r }, { SPOOLSS_DELETEPRINTERDATA, "DeletePrinterData", SpoolssDeletePrinterData_q, SpoolssDeletePrinterData_r }, { SPOOLSS_GETPRINTERDATAEX, "GetPrinterDataEx", SpoolssGetPrinterDataEx_q, SpoolssGetPrinterDataEx_r }, { SPOOLSS_SETPRINTERDATAEX, "SetPrinterDataEx", SpoolssSetPrinterDataEx_q, SpoolssSetPrinterDataEx_r }, { SPOOLSS_ENUMPRINTERDATAEX, "EnumPrinterDataEx", SpoolssEnumPrinterDataEx_q, SpoolssEnumPrinterDataEx_r }, { SPOOLSS_ENUMPRINTERKEY, "EnumPrinterKey", SpoolssEnumPrinterKey_q, SpoolssEnumPrinterKey_r }, { SPOOLSS_DELETEPRINTERDATAEX, "DeletePrinterDataEx", NULL, SpoolssGeneric_r }, { SPOOLSS_DELETEPRINTERDRIVEREX, "DeletePrinterDriverEx", NULL, SpoolssGeneric_r }, { SPOOLSS_ADDPRINTERDRIVEREX, "AddPrinterDriverEx", NULL, SpoolssGeneric_r }, { 0, NULL, NULL, NULL }, }; /* * Dissector initialisation function */ /* Protocol registration */ static int proto_dcerpc_spoolss = -1; static gint ett_dcerpc_spoolss = -1; void proto_register_dcerpc_spoolss(void) { static hf_register_info hf[] = { /* GetPrinterDriver2 */ { &hf_clientmajorversion, { "Client major version", "spoolss.clientmajorversion", FT_UINT32, BASE_DEC, NULL, 0x0, "Client printer driver major version", HFILL }}, { &hf_clientminorversion, { "Client minor version", "spoolss.clientminorversion", FT_UINT32, BASE_DEC, NULL, 0x0, "Client printer driver minor version", HFILL }}, { &hf_servermajorversion, { "Server major version", "spoolss.servermajorversion", FT_UINT32, BASE_DEC, NULL, 0x0, "Server printer driver major version", HFILL }}, { &hf_serverminorversion, { "Server minor version", "spoolss.serverminorversion", FT_UINT32, BASE_DEC, NULL, 0x0, "Server printer driver minor version", HFILL }}, { &hf_driverpath, { "Driver path", "spoolss.driverpath", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_datafile, { "Data file", "spoolss.datafile", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_configfile, { "Config file", "spoolss.configfile", FT_STRING, BASE_NONE, NULL, 0, "Printer name", HFILL }}, { &hf_helpfile, { "Help file", "spoolss.helpfile", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_monitorname, { "Monitor name", "spoolss.monitorname", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_defaultdatatype, { "Default data type", "spoolss.defaultdatatype", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_driverinfo_cversion, { "Driver version", "spoolss.driverversion", FT_UINT32, BASE_DEC, VALS(driverinfo_cversion_vals), 0, "Printer name", HFILL }}, { &hf_dependentfiles, { "Dependent files", "spoolss.dependentfiles", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_printer_status, { "Status", "spoolss.printer_status", FT_UINT32, BASE_DEC|BASE_EXT_STRING, &printer_status_vals_ext, 0, NULL, HFILL }}, { &hf_previousdrivernames, { "Previous Driver Names", "spoolss.previousdrivernames", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_driverdate, { "Driver Date", "spoolss.driverdate", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0, "Date of driver creation", HFILL }}, { &hf_padding, { "Padding", "spoolss.padding", FT_UINT32, BASE_HEX, NULL, 0, "Some padding - conveys no semantic information", HFILL }}, { &hf_driver_version_low, { "Minor Driver Version", "spoolss.minordriverversion", FT_UINT32, BASE_DEC, NULL, 0, "Driver Version Low", HFILL }}, { &hf_driver_version_high, { "Major Driver Version", "spoolss.majordriverversion", FT_UINT32, BASE_DEC, NULL, 0, "Driver Version High", HFILL }}, { &hf_mfgname, { "Mfgname", "spoolss.mfgname", FT_STRING, BASE_NONE, NULL, 0, "Manufacturer Name", HFILL }}, { &hf_oemurl, { "OEM URL", "spoolss.oemrul", FT_STRING, BASE_NONE, NULL, 0, "OEM URL - Website of Vendor", HFILL }}, { &hf_hardwareid, { "Hardware ID", "spoolss.hardwareid", FT_STRING, BASE_NONE, NULL, 0, "Hardware Identification Information", HFILL }}, { &hf_provider, { "Provider", "spoolss.provider", FT_STRING, BASE_NONE, NULL, 0, "Provider of Driver", HFILL }}, /* Setprinter RPC */ { &hf_setprinter_cmd, { "Command", "spoolss.setprinter_cmd", FT_UINT32, BASE_DEC, VALS(setprinter_cmd_vals), 0, NULL, HFILL }}, /* Enumprinters */ { &hf_enumprinters_flags, { "Flags", "spoolss.enumprinters.flags", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_enumprinters_flags_local, { "Enum local", "spoolss.enumprinters.flags.enum_local", FT_BOOLEAN, 32, TFS(&tfs_set_notset), PRINTER_ENUM_LOCAL, NULL, HFILL }}, { &hf_enumprinters_flags_name, { "Enum name", "spoolss.enumprinters.flags.enum_name", FT_BOOLEAN, 32, TFS(&tfs_set_notset), PRINTER_ENUM_NAME, NULL, HFILL }}, { &hf_enumprinters_flags_shared, { "Enum shared", "spoolss.enumprinters.flags.enum_shared", FT_BOOLEAN, 32, TFS(&tfs_set_notset), PRINTER_ENUM_SHARED, NULL, HFILL }}, { &hf_enumprinters_flags_default, { "Enum default", "spoolss.enumprinters.flags.enum_default", FT_BOOLEAN, 32, TFS(&tfs_set_notset), PRINTER_ENUM_DEFAULT, NULL, HFILL }}, { &hf_enumprinters_flags_connections, { "Enum connections", "spoolss.enumprinters.flags.enum_connections", FT_BOOLEAN, 32, TFS(&tfs_set_notset), PRINTER_ENUM_CONNECTIONS, NULL, HFILL }}, { &hf_enumprinters_flags_network, { "Enum network", "spoolss.enumprinters.flags.enum_network", FT_BOOLEAN, 32, TFS(&tfs_set_notset), PRINTER_ENUM_NETWORK, NULL, HFILL }}, { &hf_enumprinters_flags_remote, { "Enum remote", "spoolss.enumprinters.flags.enum_remote", FT_BOOLEAN, 32, TFS(&tfs_set_notset), PRINTER_ENUM_REMOTE, NULL, HFILL }}, /* GetPrinter */ { &hf_start_time, { "Start time", "spoolss.start_time", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_end_time, { "End time", "spoolss.end_time", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_elapsed_time, { "Elapsed time", "spoolss.elapsed_time", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, /* * New hf index values */ { &hf_opnum, { "Operation", "spoolss.opnum", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_hnd, { "Context handle", "spoolss.hnd", FT_BYTES, BASE_NONE, NULL, 0x0, "SPOOLSS policy handle", HFILL }}, { &hf_rc, { "Return code", "spoolss.rc", FT_UINT32, BASE_HEX | BASE_EXT_STRING, &DOS_errors_ext, 0x0, "SPOOLSS return code", HFILL }}, { &hf_offered, { "Offered", "spoolss.offered", FT_UINT32, BASE_DEC, NULL, 0x0, "Size of buffer offered in this request", HFILL }}, { &hf_needed, { "Needed", "spoolss.needed", FT_UINT32, BASE_DEC, NULL, 0x0, "Size of buffer required for request", HFILL }}, { &hf_returned, { "Returned", "spoolss.returned", FT_UINT32, BASE_DEC, NULL, 0x0, "Number of items returned", HFILL }}, { &hf_buffer_size, { "Buffer size", "spoolss.buffer.size", FT_UINT32, BASE_DEC, NULL, 0x0, "Size of buffer", HFILL }}, { &hf_buffer_data, { "Buffer data", "spoolss.buffer.data", FT_BYTES, BASE_NONE, NULL, 0x0, "Contents of buffer", HFILL }}, { &hf_string_parm_size, { "String buffer size", "spoolss.string.buffersize", FT_UINT32, BASE_DEC, NULL, 0x0, "Size of string buffer", HFILL }}, { &hf_string_parm_data, { "String data", "spoolss.string.data", FT_STRINGZ, BASE_NONE, NULL, 0x0, "Contents of string", HFILL }}, { &hf_offset, { "Offset", "spoolss.offset", FT_UINT32, BASE_DEC, NULL, 0x0, "Offset of data", HFILL }}, { &hf_level, { "Info level", "spoolss.enumjobs.level", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_printername, { "Printer name", "spoolss.printername", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_machinename, { "Machine name", "spoolss.machinename", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_notifyname, { "Notify name", "spoolss.notifyname", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_printerdesc, { "Printer description", "spoolss.printerdesc", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_printercomment, { "Printer comment", "spoolss.printercomment", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_servername, { "Server name", "spoolss.servername", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_sharename, { "Share name", "spoolss.sharename", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_portname, { "Port name", "spoolss.portname", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_printerlocation, { "Printer location", "spoolss.printerlocation", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_environment, { "Environment name", "spoolss.environment", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_drivername, { "Driver name", "spoolss.drivername", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_username, { "User name", "spoolss.username", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_documentname, { "Document name", "spoolss.document", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_outputfile, { "Output file", "spoolss.outputfile", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_datatype, { "Datatype", "spoolss.datatype", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_textstatus, { "Text status", "spoolss.textstatus", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_sepfile, { "Separator file", "spoolss.setpfile", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_parameters, { "Parameters", "spoolss.parameters", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_printprocessor, { "Print processor", "spoolss.printprocessor", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, /* Printer data */ { &hf_printerdata, { "Data", "spoolss.printerdata", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_printerdata_key, { "Key", "spoolss.printerdata.key", FT_STRING, BASE_NONE, NULL, 0, "Printer data key", HFILL }}, { &hf_printerdata_value, { "Value", "spoolss.printerdata.value", FT_STRING, BASE_NONE, NULL, 0, "Printer data value", HFILL }}, { &hf_printerdata_type, { "Type", "spoolss.printerdata.type", FT_UINT32, BASE_DEC|BASE_EXT_STRING, &reg_datatypes_ext, 0, "Printer data type", HFILL }}, { &hf_printerdata_size, { "Size", "spoolss.printerdata.size", FT_UINT32, BASE_DEC, NULL, 0, "Printer data size", HFILL }}, { &hf_printerdata_data, { "Data", "spoolss.printerdata.data", FT_BYTES, BASE_NONE, NULL, 0x0, "Printer data", HFILL }}, { &hf_printerdata_data_dword, { "DWORD data", "spoolss.printerdata.data.dword", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_printerdata_data_sz, { "String data", "spoolss.printerdata.data.sz", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, /* Devicemode */ { &hf_devmodectr_size, { "Devicemode ctr size", "spoolss.devicemodectr.size", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode, { "Devicemode", "spoolss.devmode", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_devmode_size, { "Size", "spoolss.devmode.size", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_spec_version, { "Spec version", "spoolss.devmode.spec_version", FT_UINT16, BASE_DEC, VALS(devmode_specversion_vals), 0, NULL, HFILL }}, { &hf_devmode_driver_version, { "Driver version", "spoolss.devmode.driver_version", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_size2, { "Size2", "spoolss.devmode.size2", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_fields, { "Fields", "spoolss.devmode.fields", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_devmode_orientation, { "Orientation", "spoolss.devmode.orientation", FT_UINT16, BASE_DEC, VALS(devmode_orientation_vals), 0, NULL, HFILL }}, { &hf_devmode_paper_size, { "Paper size", "spoolss.devmode.paper_size", FT_UINT16, BASE_DEC|BASE_EXT_STRING, &devmode_papersize_vals_ext, 0, NULL, HFILL }}, { &hf_devmode_paper_width, { "Paper width", "spoolss.devmode.paper_width", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_paper_length, { "Paper length", "spoolss.devmode.paper_length", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_scale, { "Scale", "spoolss.devmode.scale", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_copies, { "Copies", "spoolss.devmode.copies", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_default_source, { "Default source", "spoolss.devmode.default_source", FT_UINT16, BASE_DEC|BASE_EXT_STRING, &devmode_papersource_vals_ext, 0, NULL, HFILL }}, { &hf_devmode_print_quality, { "Print quality", "spoolss.devmode.print_quality", FT_UINT16, BASE_DEC, VALS(devmode_printquality_vals), 0, NULL, HFILL }}, { &hf_devmode_color, { "Color", "spoolss.devmode.color", FT_UINT16, BASE_DEC, VALS(devmode_colour_vals), 0, NULL, HFILL }}, { &hf_devmode_duplex, { "Duplex", "spoolss.devmode.duplex", FT_UINT16, BASE_DEC, VALS(devmode_duplex_vals), 0, NULL, HFILL }}, { &hf_devmode_y_resolution, { "Y resolution", "spoolss.devmode.y_resolution", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_tt_option, { "TT option", "spoolss.devmode.tt_option", FT_UINT16, BASE_DEC, VALS(devmode_ttoption_vals), 0, NULL, HFILL }}, { &hf_devmode_collate, { "Collate", "spoolss.devmode.collate", FT_UINT16, BASE_DEC, VALS(devmode_collate_vals), 0, NULL, HFILL }}, { &hf_devmode_log_pixels, { "Log pixels", "spoolss.devmode.log_pixels", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_bits_per_pel, { "Bits per pel", "spoolss.devmode.bits_per_pel", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_pels_width, { "Pels width", "spoolss.devmode.pels_width", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_pels_height, { "Pels height", "spoolss.devmode.pels_height", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_display_flags, { "Display flags", "spoolss.devmode.display_flags", FT_UINT32, BASE_DEC, VALS(devmode_displayflags_vals), 0, NULL, HFILL }}, { &hf_devmode_display_freq, { "Display frequency", "spoolss.devmode.display_freq", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_icm_method, { "ICM method", "spoolss.devmode.icm_method", FT_UINT32, BASE_DEC, VALS(devmode_icmmethod_vals), 0, NULL, HFILL }}, { &hf_devmode_icm_intent, { "ICM intent", "spoolss.devmode.icm_intent", FT_UINT32, BASE_DEC, VALS(devmode_icmintent_vals), 0, NULL, HFILL }}, { &hf_devmode_media_type, { "Media type", "spoolss.devmode.media_type", FT_UINT32, BASE_DEC, VALS(devmode_mediatype_vals), 0, NULL, HFILL }}, { &hf_devmode_dither_type, { "Dither type", "spoolss.devmode.dither_type", FT_UINT32, BASE_DEC, VALS(devmode_dithertype_vals), 0, NULL, HFILL }}, { &hf_devmode_reserved1, { "Reserved1", "spoolss.devmode.reserved1", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_reserved2, { "Reserved2", "spoolss.devmode.reserved2", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_panning_width, { "Panning width", "spoolss.devmode.panning_width", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_panning_height, { "Panning height", "spoolss.devmode.panning_height", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_driver_extra_len, { "Driver extra length", "spoolss.devmode.driver_extra_len", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_driver_extra, { "Driver extra", "spoolss.devmode.driver_extra", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL }}, /* Devicemode fields */ { &hf_devmode_fields_orientation, { "Orientation", "spoolss.devmode.fields.orientation", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_ORIENTATION, NULL, HFILL }}, { &hf_devmode_fields_papersize, { "Paper size", "spoolss.devmode.fields.paper_size", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_PAPERSIZE, NULL, HFILL }}, { &hf_devmode_fields_paperlength, { "Paper length", "spoolss.devmode.fields.paper_length", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_PAPERLENGTH, NULL, HFILL }}, { &hf_devmode_fields_paperwidth, { "Paper width", "spoolss.devmode.fields.paper_width", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_PAPERWIDTH, NULL, HFILL }}, { &hf_devmode_fields_scale, { "Scale", "spoolss.devmode.fields.scale", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_SCALE, NULL, HFILL }}, { &hf_devmode_fields_position, { "Position", "spoolss.devmode.fields.position", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_POSITION, NULL, HFILL }}, { &hf_devmode_fields_nup, { "N-up", "spoolss.devmode.fields.nup", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_NUP, NULL, HFILL }}, { &hf_devmode_fields_copies, { "Copies", "spoolss.devmode.fields.copies", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_COPIES, NULL, HFILL }}, { &hf_devmode_fields_defaultsource, { "Default source", "spoolss.devmode.fields.default_source", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_DEFAULTSOURCE, NULL, HFILL }}, { &hf_devmode_fields_printquality, { "Print quality", "spoolss.devmode.fields.print_quality", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_PRINTQUALITY, NULL, HFILL }}, { &hf_devmode_fields_color, { "Color", "spoolss.devmode.fields.color", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_COLOR, NULL, HFILL }}, { &hf_devmode_fields_duplex, { "Duplex", "spoolss.devmode.fields.duplex", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_DUPLEX, NULL, HFILL }}, { &hf_devmode_fields_yresolution, { "Y resolution", "spoolss.devmode.fields.y_resolution", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_YRESOLUTION, NULL, HFILL }}, { &hf_devmode_fields_ttoption, { "TT option", "spoolss.devmode.fields.tt_option", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_TTOPTION, NULL, HFILL }}, { &hf_devmode_fields_collate, { "Collate", "spoolss.devmode.fields.collate", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_COLLATE, NULL, HFILL }}, { &hf_devmode_fields_formname, { "Form name", "spoolss.devmode.fields.form_name", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_FORMNAME, NULL, HFILL }}, { &hf_devmode_fields_logpixels, { "Log pixels", "spoolss.devmode.fields.log_pixels", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_LOGPIXELS, NULL, HFILL }}, { &hf_devmode_fields_bitsperpel, { "Bits per pel", "spoolss.devmode.fields.bits_per_pel", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_BITSPERPEL, NULL, HFILL }}, { &hf_devmode_fields_pelswidth, { "Pels width", "spoolss.devmode.fields.pels_width", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_PELSWIDTH, NULL, HFILL }}, { &hf_devmode_fields_pelsheight, { "Pels height", "spoolss.devmode.fields.pels_height", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_PELSHEIGHT, NULL, HFILL }}, { &hf_devmode_fields_displayflags, { "Display flags", "spoolss.devmode.fields.display_flags", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_DISPLAYFLAGS, NULL, HFILL }}, { &hf_devmode_fields_displayfrequency, { "Display frequency", "spoolss.devmode.fields.display_frequency", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_DISPLAYFREQUENCY, NULL, HFILL }}, { &hf_devmode_fields_icmmethod, { "ICM method", "spoolss.devmode.fields.icm_method", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_ICMMETHOD, NULL, HFILL }}, { &hf_devmode_fields_icmintent, { "ICM intent", "spoolss.devmode.fields.icm_intent", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_ICMINTENT, NULL, HFILL }}, { &hf_devmode_fields_mediatype, { "Media type", "spoolss.devmode.fields.media_type", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_MEDIATYPE, NULL, HFILL }}, { &hf_devmode_fields_dithertype, { "Dither type", "spoolss.devmode.fields.dither_type", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_DITHERTYPE, NULL, HFILL }}, { &hf_devmode_fields_panningwidth, { "Panning width", "spoolss.devmode.fields.panning_width", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_PANNINGWIDTH, NULL, HFILL }}, { &hf_devmode_fields_panningheight, { "Panning height", "spoolss.devmode.fields.panning_height", FT_BOOLEAN, 32, TFS(&tfs_set_notset), DEVMODE_PANNINGHEIGHT, NULL, HFILL }}, /* EnumPrinterData RPC */ { &hf_enumprinterdata_enumindex, { "Enum index", "spoolss.enumprinterdata.enumindex", FT_UINT32, BASE_DEC, NULL, 0x0, "Index for start of enumeration", HFILL }}, { &hf_enumprinterdata_value_offered, { "Value size offered", "spoolss.enumprinterdata.value_offered", FT_UINT32, BASE_DEC, NULL, 0x0, "Buffer size offered for printerdata value", HFILL }}, { &hf_enumprinterdata_data_offered, { "Data size offered", "spoolss.enumprinterdata.data_offered", FT_UINT32, BASE_DEC, NULL, 0x0, "Buffer size offered for printerdata data", HFILL }}, { &hf_enumprinterdata_value_len, { "Value length", "spoolss.enumprinterdata.value_len", FT_UINT32, BASE_DEC, NULL, 0x0, "Size of printerdata value", HFILL }}, { &hf_enumprinterdata_value_needed, { "Value size needed", "spoolss.enumprinterdata.value_needed", FT_UINT32, BASE_DEC, NULL, 0x0, "Buffer size needed for printerdata value", HFILL }}, { &hf_enumprinterdata_data_needed, { "Data size needed", "spoolss.enumprinterdata.data_needed", FT_UINT32, BASE_DEC, NULL, 0x0, "Buffer size needed for printerdata data", HFILL }}, /* Print jobs */ { &hf_job_id, { "Job ID", "spoolss.job.id", FT_UINT32, BASE_DEC, NULL, 0x0, "Job identification number", HFILL }}, { &hf_job_status, { "Job status", "spoolss.job.status", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_job_status_paused, { "Paused", "spoolss.job.status.paused", FT_BOOLEAN, 32, TFS(&tfs_job_status_paused), JOB_STATUS_PAUSED, NULL, HFILL }}, { &hf_job_status_error, { "Error", "spoolss.job.status.error", FT_BOOLEAN, 32, TFS(&tfs_job_status_error), JOB_STATUS_ERROR, NULL, HFILL }}, { &hf_job_status_deleting, { "Deleting", "spoolss.job.status.deleting", FT_BOOLEAN, 32, TFS(&tfs_job_status_deleting), JOB_STATUS_DELETING, NULL, HFILL }}, { &hf_job_status_spooling, { "Spooling", "spoolss.job.status.spooling", FT_BOOLEAN, 32, TFS(&tfs_job_status_spooling), JOB_STATUS_SPOOLING, NULL, HFILL }}, { &hf_job_status_printing, { "Printing", "spoolss.job.status.printing", FT_BOOLEAN, 32, TFS(&tfs_job_status_printing), JOB_STATUS_PRINTING, NULL, HFILL }}, { &hf_job_status_offline, { "Offline", "spoolss.job.status.offline", FT_BOOLEAN, 32, TFS(&tfs_job_status_offline), JOB_STATUS_OFFLINE, NULL, HFILL }}, { &hf_job_status_paperout, { "Paperout", "spoolss.job.status.paperout", FT_BOOLEAN, 32, TFS(&tfs_job_status_paperout), JOB_STATUS_PAPEROUT, NULL, HFILL }}, { &hf_job_status_printed, { "Printed", "spoolss.job.status.printed", FT_BOOLEAN, 32, TFS(&tfs_job_status_printed), JOB_STATUS_PRINTED, NULL, HFILL }}, { &hf_job_status_deleted, { "Deleted", "spoolss.job.status.deleted", FT_BOOLEAN, 32, TFS(&tfs_job_status_deleted), JOB_STATUS_DELETED, NULL, HFILL }}, { &hf_job_status_blocked, { "Blocked", "spoolss.job.status.blocked", FT_BOOLEAN, 32, TFS(&tfs_job_status_blocked), JOB_STATUS_BLOCKED, NULL, HFILL }}, { &hf_job_status_user_intervention, { "User intervention", "spoolss.job.status.user_intervention", FT_BOOLEAN, 32, TFS(&tfs_job_status_user_intervention), JOB_STATUS_USER_INTERVENTION, NULL, HFILL }}, { &hf_job_priority, { "Job priority", "spoolss.job.priority", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_job_position, { "Job position", "spoolss.job.position", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_job_totalpages, { "Job total pages", "spoolss.job.totalpages", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_job_totalbytes, { "Job total bytes", "spoolss.job.totalbytes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_job_bytesprinted, { "Job bytes printed", "spoolss.job.bytesprinted", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_job_pagesprinted, { "Job pages printed", "spoolss.job.pagesprinted", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_job_size, { "Job size", "spoolss.job.size", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, /* Forms */ { &hf_form, { "Data", "spoolss.form", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_form_level, { "Level", "spoolss.form.level", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_form_name, { "Name", "spoolss.form.name", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_form_flags, { "Flags", "spoolss.form.flags", FT_UINT32, BASE_DEC, VALS(form_type_vals), 0, NULL, HFILL }}, { &hf_form_unknown, { "Unknown", "spoolss.form.unknown", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_form_width, { "Width", "spoolss.form.width", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_form_height, { "Height", "spoolss.form.height", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_form_left_margin, { "Left margin", "spoolss.form.left", FT_UINT32, BASE_DEC, NULL, 0, "Left", HFILL }}, { &hf_form_top_margin, { "Top", "spoolss.form.top", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_form_horiz_len, { "Horizontal", "spoolss.form.horiz", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_form_vert_len, { "Vertical", "spoolss.form.vert", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_enumforms_num, { "Num", "spoolss.enumforms.num", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, /* Print notify */ { &hf_notify_options_version, { "Version", "spoolss.notify_options.version", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_options_flags, { "Flags", "spoolss.notify_options.flags", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_options_count, { "Count", "spoolss.notify_options.count", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_option_type, { "Type", "spoolss.notify_option.type", FT_UINT16, BASE_DEC, VALS(printer_notify_types), 0, NULL, HFILL }}, { &hf_notify_option_reserved1, { "Reserved1", "spoolss.notify_option.reserved1", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_option_reserved2, { "Reserved2", "spoolss.notify_option.reserved2", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_option_reserved3, { "Reserved3", "spoolss.notify_option.reserved3", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_option_count, { "Count", "spoolss.notify_option.count", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_option_data_count, { "Count", "spoolss.notify_option_data.count", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_options_flags_refresh, { "Refresh", "spoolss.notify_options.flags.refresh", FT_BOOLEAN, 32, TFS(&tfs_notify_options_flags_refresh), PRINTER_NOTIFY_OPTIONS_REFRESH, NULL, HFILL }}, { &hf_notify_info_count, { "Count", "spoolss.notify_info.count", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_info_version, { "Version", "spoolss.notify_info.version", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_info_flags, { "Flags", "spoolss.notify_info.flags", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_notify_info_data_type, { "Type", "spoolss.notify_info_data.type", FT_UINT16, BASE_DEC, VALS(printer_notify_types), 0, NULL, HFILL }}, { &hf_notify_field, { "Field", "spoolss.notify_field", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_info_data_count, { "Count", "spoolss.notify_info_data.count", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_info_data_id, { "Job Id", "spoolss.notify_info_data.jobid", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_info_data_value1, { "Value1", "spoolss.notify_info_data.value1", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_notify_info_data_value2, { "Value2", "spoolss.notify_info_data.value2", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_notify_info_data_bufsize, { "Buffer size", "spoolss.notify_info_data.bufsize", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_notify_info_data_buffer, { "Buffer", "spoolss.notify_info_data.buffer", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_notify_info_data_buffer_len, { "Buffer length", "spoolss.notify_info_data.buffer.len", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_notify_info_data_buffer_data, { "Buffer data", "spoolss.notify_info_data.buffer.data", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL }}, /* RffpCNex RPC */ { &hf_rffpcnex_options, { "Options", "spoolss.rffpcnex.options", FT_UINT32, BASE_DEC, NULL, 0, "RFFPCNEX options", HFILL }}, { &hf_printerlocal, /* XXX: move me */ { "Printer local", "spoolss.printer_local", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_rffpcnex_flags, { "RFFPCNEX flags", "spoolss.rffpcnex.flags", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_rffpcnex_flags_add_printer, { "Add printer", "spoolss.rffpcnex.flags.add_printer", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_add_printer), SPOOLSS_PRINTER_CHANGE_ADD_PRINTER, NULL, HFILL }}, { &hf_rffpcnex_flags_set_printer, { "Set printer", "spoolss.rffpcnex.flags.set_printer", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_set_printer), SPOOLSS_PRINTER_CHANGE_SET_PRINTER, NULL, HFILL }}, { &hf_rffpcnex_flags_delete_printer, { "Delete printer", "spoolss.rffpcnex.flags.delete_printer", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_delete_printer), SPOOLSS_PRINTER_CHANGE_DELETE_PRINTER, NULL, HFILL }}, { &hf_rffpcnex_flags_add_job, { "Add job", "spoolss.rffpcnex.flags.add_job", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_add_job), SPOOLSS_PRINTER_CHANGE_ADD_JOB, NULL, HFILL }}, { &hf_rffpcnex_flags_set_job, { "Set job", "spoolss.rffpcnex.flags.set_job", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_set_job), SPOOLSS_PRINTER_CHANGE_SET_JOB, NULL, HFILL }}, { &hf_rffpcnex_flags_delete_job, { "Delete job", "spoolss.rffpcnex.flags.delete_job", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_delete_job), SPOOLSS_PRINTER_CHANGE_DELETE_JOB, NULL, HFILL }}, { &hf_rffpcnex_flags_write_job, { "Write job", "spoolss.rffpcnex.flags.write_job", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_write_job), SPOOLSS_PRINTER_CHANGE_WRITE_JOB, NULL, HFILL }}, { &hf_rffpcnex_flags_add_form, { "Add form", "spoolss.rffpcnex.flags.add_form", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_add_form), SPOOLSS_PRINTER_CHANGE_ADD_FORM, NULL, HFILL }}, { &hf_rffpcnex_flags_set_form, { "Set form", "spoolss.rffpcnex.flags.set_form", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_set_form), SPOOLSS_PRINTER_CHANGE_SET_FORM, NULL, HFILL }}, { &hf_rffpcnex_flags_delete_form, { "Delete form", "spoolss.rffpcnex.flags.delete_form", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_delete_form), SPOOLSS_PRINTER_CHANGE_DELETE_FORM, NULL, HFILL }}, { &hf_rffpcnex_flags_add_port, { "Add port", "spoolss.rffpcnex.flags.add_port", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_add_port), SPOOLSS_PRINTER_CHANGE_ADD_PORT, NULL, HFILL }}, { &hf_rffpcnex_flags_configure_port, { "Configure port", "spoolss.rffpcnex.flags.configure_port", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_configure_port), SPOOLSS_PRINTER_CHANGE_CONFIGURE_PORT, NULL, HFILL }}, { &hf_rffpcnex_flags_delete_port, { "Delete port", "spoolss.rffpcnex.flags.delete_port", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_delete_port), SPOOLSS_PRINTER_CHANGE_DELETE_PORT, NULL, HFILL }}, { &hf_rffpcnex_flags_add_print_processor, { "Add processor", "spoolss.rffpcnex.flags.add_processor", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_add_print_processor), SPOOLSS_PRINTER_CHANGE_ADD_PRINT_PROCESSOR, NULL, HFILL }}, { &hf_rffpcnex_flags_delete_print_processor, { "Delete processor", "spoolss.rffpcnex.flags.delete_processor", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_delete_print_processor), SPOOLSS_PRINTER_CHANGE_DELETE_PRINT_PROCESSOR, NULL, HFILL }}, { &hf_rffpcnex_flags_add_driver, { "Add driver", "spoolss.rffpcnex.flags.add_driver", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_add_driver), SPOOLSS_PRINTER_CHANGE_ADD_PRINTER_DRIVER, NULL, HFILL }}, { &hf_rffpcnex_flags_set_driver, { "Set driver", "spoolss.rffpcnex.flags.set_driver", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_set_driver), SPOOLSS_PRINTER_CHANGE_SET_PRINTER_DRIVER, NULL, HFILL }}, { &hf_rffpcnex_flags_delete_driver, { "Delete driver", "spoolss.rffpcnex.flags.delete_driver", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_delete_driver), SPOOLSS_PRINTER_CHANGE_DELETE_PRINTER_DRIVER, NULL, HFILL }}, { &hf_rffpcnex_flags_timeout, { "Timeout", "spoolss.rffpcnex.flags.timeout", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_timeout), SPOOLSS_PRINTER_CHANGE_TIMEOUT, NULL, HFILL }}, { &hf_rffpcnex_flags_failed_printer_connection, { "Failed printer connection", "spoolss.rffpcnex.flags.failed_connection_printer", FT_BOOLEAN, 32, TFS(&tfs_rffpcnex_flags_failed_connection_printer), SPOOLSS_PRINTER_CHANGE_FAILED_CONNECTION_PRINTER, NULL, HFILL }}, /* RRPCN RPC */ { &hf_rrpcn_changelow, { "Change low", "spoolss.rrpcn.changelow", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_rrpcn_changehigh, { "Change high", "spoolss.rrpcn.changehigh", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_rrpcn_unk0, { "Unknown 0", "spoolss.rrpcn.unk0", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_rrpcn_unk1, { "Unknown 1", "spoolss.rrpcn.unk1", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, /* ReplyOpenPrinter RPC */ { &hf_replyopenprinter_unk0, { "Unknown 0", "spoolss.replyopenprinter.unk0", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_replyopenprinter_unk1, { "Unknown 1", "spoolss.replyopenprinter.unk1", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_devmode_devicename, { "DeviceName", "spoolss.devmode.devicename", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_devmode_form_name, { "FormName", "spoolss.devmode.form_name", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_relative_string, { "String", "spoolss.relative_string", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_value_name, { "Value Name", "spoolss.value_name", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_keybuffer, { "Key", "spoolss.hf_keybuffer", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_value_string, { "Value", "spoolss.value_string", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, /* Printer attributes */ { &hf_printer_attributes, { "Attributes", "spoolss.printer_attributes", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_printer_attributes_queued, { "Queued", "spoolss.printer_attributes.queued", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_queued), PRINTER_ATTRIBUTE_QUEUED, NULL, HFILL }}, { &hf_printer_attributes_direct, { "Direct", "spoolss.printer_attributes.direct", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_direct), PRINTER_ATTRIBUTE_DIRECT, NULL, HFILL }}, { &hf_printer_attributes_default, { "Default (9x/ME only)", "spoolss.printer_attributes.default",FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_default), PRINTER_ATTRIBUTE_DEFAULT, "Default", HFILL }}, { &hf_printer_attributes_shared, { "Shared", "spoolss.printer_attributes.shared", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_shared), PRINTER_ATTRIBUTE_SHARED, NULL, HFILL }}, { &hf_printer_attributes_network, { "Network", "spoolss.printer_attributes.network", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_network), PRINTER_ATTRIBUTE_NETWORK, NULL, HFILL }}, { &hf_printer_attributes_hidden, { "Hidden", "spoolss.printer_attributes.hidden", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_hidden), PRINTER_ATTRIBUTE_HIDDEN, NULL, HFILL }}, { &hf_printer_attributes_local, { "Local", "spoolss.printer_attributes.local", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_local), PRINTER_ATTRIBUTE_LOCAL, NULL, HFILL }}, { &hf_printer_attributes_enable_devq, { "Enable devq", "spoolss.printer_attributes.enable_devq", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_enable_devq), PRINTER_ATTRIBUTE_ENABLE_DEVQ, "Enable evq", HFILL }}, { &hf_printer_attributes_keep_printed_jobs, { "Keep printed jobs", "spoolss.printer_attributes.keep_printed_jobs", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_keep_printed_jobs), PRINTER_ATTRIBUTE_KEEPPRINTEDJOBS, NULL, HFILL }}, { &hf_printer_attributes_do_complete_first, { "Do complete first", "spoolss.printer_attributes.do_complete_first", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_do_complete_first), PRINTER_ATTRIBUTE_DO_COMPLETE_FIRST, NULL, HFILL }}, { &hf_printer_attributes_work_offline, { "Work offline (9x/ME only)", "spoolss.printer_attributes.work_offline", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_work_offline), PRINTER_ATTRIBUTE_WORK_OFFLINE, "Work offline", HFILL }}, { &hf_printer_attributes_enable_bidi, { "Enable bidi (9x/ME only)", "spoolss.printer_attributes.enable_bidi", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_enable_bidi), PRINTER_ATTRIBUTE_ENABLE_BIDI, "Enable bidi", HFILL }}, { &hf_printer_attributes_raw_only, { "Raw only", "spoolss.printer_attributes.raw_only", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_raw_only), PRINTER_ATTRIBUTE_RAW_ONLY, NULL, HFILL }}, { &hf_printer_attributes_published, { "Published", "spoolss.printer_attributes.published", FT_BOOLEAN, 32, TFS(&tfs_printer_attributes_published), PRINTER_ATTRIBUTE_PUBLISHED, NULL, HFILL }}, /* Timestamps */ { &hf_time_year, { "Year", "spoolss.time.year", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_time_month, { "Month", "spoolss.time.month", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_time_dow, { "Day of week", "spoolss.time.dow", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_time_day, { "Day", "spoolss.time.day", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_time_hour, { "Hour", "spoolss.time.hour", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_time_minute, { "Minute", "spoolss.time.minute", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_time_second, { "Second", "spoolss.time.second", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_time_msec, { "Millisecond", "spoolss.time.msec", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, /* Userlevel */ { &hf_userlevel_size, { "Size", "spoolss.userlevel.size", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_userlevel_client, { "Client", "spoolss.userlevel.client", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_userlevel_user, { "User", "spoolss.userlevel.user", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_userlevel_build, { "Build", "spoolss.userlevel.build", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_userlevel_major, { "Major", "spoolss.userlevel.major", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_userlevel_minor, { "Minor", "spoolss.userlevel.minor", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_userlevel_processor, { "Processor", "spoolss.userlevel.processor", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, /* EnumprinterdataEx RPC */ { &hf_enumprinterdataex_name_offset, { "Name offset", "spoolss.enumprinterdataex.name_offset", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_name_len, { "Name len", "spoolss.enumprinterdataex.name_len", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_name, { "Name", "spoolss.enumprinterdataex.name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_val_offset, { "Value offset", "spoolss.enumprinterdataex.value_offset", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_val_len, { "Value len", "spoolss.enumprinterdataex.value_len", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_val_dword_high, { "DWORD value (high)", "spoolss.enumprinterdataex.val_dword.high", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_value_null, { "Value", "spoolss.enumprinterdataex.val_null", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_value_uint, { "Value", "spoolss.enumprinterdataex.val_uint", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_value_binary, { "Value", "spoolss.enumprinterdataex.val_binary", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_value_multi_sz, { "Value", "spoolss.enumprinterdataex.val_multi_sz", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }}, { &hf_enumprinterdataex_val_dword_low, { "DWORD value (low)", "spoolss.enumprinterdataex.val_dword.low", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }}, /* RouterReplyPrinter RPC */ { &hf_routerreplyprinter_condition, { "Condition", "spoolss.routerreplyprinter.condition", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_routerreplyprinter_unknown1, { "Unknown1", "spoolss.routerreplyprinter.unknown1", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_routerreplyprinter_changeid, { "Change id", "spoolss.routerreplyprinter.changeid", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, /* EnumPrinterKey RPC */ { &hf_keybuffer_size, { "Key Buffer size", "spoolss.keybuffer.size", FT_UINT32, BASE_DEC, NULL, 0x0, "Size of buffer", HFILL }}, /* SetJob RPC */ { &hf_setjob_cmd, { "Set job command", "spoolss.setjob.cmd", FT_UINT32, BASE_DEC, VALS(setjob_commands), 0x0, "Printer data name", HFILL }}, /* EnumJobs RPC */ { &hf_enumjobs_firstjob, { "First job", "spoolss.enumjobs.firstjob", FT_UINT32, BASE_DEC, NULL, 0x0, "Index of first job to return", HFILL }}, { &hf_enumjobs_numjobs, { "Num jobs", "spoolss.enumjobs.numjobs", FT_UINT32, BASE_DEC, NULL, 0x0, "Number of jobs to return", HFILL }}, /* Security descriptor buffer */ { &hf_secdescbuf_maxlen, { "Max len", "spoolss.secdescbuf.max_len", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_secdescbuf_undoc, { "Undocumented", "spoolss.secdescbuf.undoc", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_secdescbuf_len, { "Length", "spoolss.secdescbuf.len", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, /* Spool printer info */ { &hf_spool_printer_info_devmode_ptr, { "Devmode pointer", "spoolss.spoolprinterinfo.devmode_ptr", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_spool_printer_info_secdesc_ptr, { "Secdesc pointer", "spoolss.spoolprinterinfo.secdesc_ptr", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, /* WritePrinter RPC */ { &hf_writeprinter_numwritten, { "Num written", "spoolss.writeprinter.numwritten", FT_UINT32, BASE_DEC, NULL, 0x0, "Number of bytes written", HFILL }}, /* Setprinterdataex RPC */ { &hf_setprinterdataex_max_len, { "Max len", "spoolss.setprinterdataex.max_len", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_setprinterdataex_real_len, { "Real len", "spoolss.setprinterdataex.real_len", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_setprinterdataex_data, { "Data", "spoolss.setprinterdataex.data", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL }}, /* Specific access rights */ { &hf_access_required, { "Access required", "spoolss.access_required", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }}, { &hf_server_access_admin, { "Server admin", "spoolss.access_mask.server_admin", FT_BOOLEAN, 32, TFS(&tfs_set_notset), SERVER_ACCESS_ADMINISTER, NULL, HFILL }}, { &hf_server_access_enum, { "Server enum", "spoolss.access_mask.server_enum", FT_BOOLEAN, 32, TFS(&tfs_set_notset), SERVER_ACCESS_ENUMERATE, NULL, HFILL }}, { &hf_printer_access_admin, { "Printer admin", "spoolss.access_mask.printer_admin", FT_BOOLEAN, 32, TFS(&tfs_set_notset), PRINTER_ACCESS_ADMINISTER, NULL, HFILL }}, { &hf_printer_access_use, { "Printer use", "spoolss.access_mask.printer_use", FT_BOOLEAN, 32, TFS(&tfs_set_notset), PRINTER_ACCESS_USE, NULL, HFILL }}, { &hf_job_access_admin, { "Job admin", "spoolss.access_mask.job_admin", FT_BOOLEAN, 32, TFS(&tfs_set_notset), JOB_ACCESS_ADMINISTER, NULL, HFILL }}, /* Printer information */ { &hf_printer_cjobs, { "CJobs", "spoolss.printer.cjobs", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_total_jobs, { "Total jobs", "spoolss.printer.total_jobs", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_total_bytes, { "Total bytes", "spoolss.printer.total_bytes", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_global_counter, { "Global counter", "spoolss.printer.global_counter", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_total_pages, { "Total pages", "spoolss.printer.total_pages", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_major_version, { "Major version", "spoolss.printer.major_version", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_build_version, { "Build version", "spoolss.printer.build_version", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk7, { "Unknown 7", "spoolss.printer.unknown7", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk8, { "Unknown 8", "spoolss.printer.unknown8", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk9, { "Unknown 9", "spoolss.printer.unknown9", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_session_ctr, { "Session counter", "spoolss.printer.session_ctr", FT_UINT32, BASE_DEC, NULL, 0, "Sessopm counter", HFILL }}, { &hf_printer_unk11, { "Unknown 11", "spoolss.printer.unknown11", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_printer_errors, { "Printer errors", "spoolss.printer.printer_errors", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk13, { "Unknown 13", "spoolss.printer.unknown13", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk14, { "Unknown 14", "spoolss.printer.unknown14", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk15, { "Unknown 15", "spoolss.printer.unknown15", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk16, { "Unknown 16", "spoolss.printer.unknown16", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_changeid, { "Change id", "spoolss.printer.changeid", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk18, { "Unknown 18", "spoolss.printer.unknown18", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk20, { "Unknown 20", "spoolss.printer.unknown20", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_c_setprinter, { "Csetprinter", "spoolss.printer.c_setprinter", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk22, { "Unknown 22", "spoolss.printer.unknown22", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk23, { "Unknown 23", "spoolss.printer.unknown23", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk24, { "Unknown 24", "spoolss.printer.unknown24", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk25, { "Unknown 25", "spoolss.printer.unknown25", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk26, { "Unknown 26", "spoolss.printer.unknown26", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk27, { "Unknown 27", "spoolss.printer.unknown27", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk28, { "Unknown 28", "spoolss.printer.unknown28", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_unk29, { "Unknown 29", "spoolss.printer.unknown29", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_flags, { "Flags", "spoolss.printer.flags", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL }}, { &hf_printer_priority, { "Priority", "spoolss.printer.priority", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_default_priority, { "Default Priority", "spoolss.printer.default_priority", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_averageppm, { "Average PPM", "spoolss.printer.averageppm", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_jobs, { "Jobs", "spoolss.printer.jobs", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }}, { &hf_printer_guid, { "GUID", "spoolss.printer.guid", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL }}, { &hf_printer_action, { "Action", "spoolss.printer.action", FT_UINT32, BASE_DEC, VALS(getprinter_action_vals), 0, NULL, HFILL }}, }; static gint *ett[] = { &ett_dcerpc_spoolss, &ett_PRINTER_DATATYPE, &ett_DEVMODE_CTR, &ett_DEVMODE, &ett_DEVMODE_fields, &ett_USER_LEVEL_CTR, &ett_USER_LEVEL_1, &ett_BUFFER, &ett_PRINTER_INFO, &ett_SPOOL_PRINTER_INFO_LEVEL, &ett_PRINTER_INFO_0, &ett_PRINTER_INFO_1, &ett_PRINTER_INFO_2, &ett_PRINTER_INFO_3, &ett_PRINTER_INFO_7, &ett_RELSTR, &ett_RELSTR_ARRAY, &ett_FORM_REL, &ett_FORM_CTR, &ett_FORM_1, &ett_JOB_INFO_1, &ett_JOB_INFO_2, &ett_SEC_DESC_BUF, &ett_SYSTEM_TIME, &ett_DOC_INFO_1, &ett_DOC_INFO, &ett_DOC_INFO_CTR, &ett_printerdata_value, &ett_printerdata_data, &ett_writeprinter_buffer, &ett_DRIVER_INFO_1, &ett_DRIVER_INFO_2, &ett_DRIVER_INFO_3, &ett_DRIVER_INFO_6, &ett_DRIVER_INFO_101, &ett_rffpcnex_flags, &ett_notify_options_flags, &ett_NOTIFY_INFO_DATA, &ett_NOTIFY_OPTION, &ett_printer_attributes, &ett_job_status, &ett_enumprinters_flags, &ett_PRINTER_DATA_CTR, &ett_printer_enumdataex_value, }; static ei_register_info ei[] = { { &ei_unimplemented_dissector, { "spoolss.unimplemented_dissector", PI_UNDECODED, PI_WARN, "Unimplemented dissector: SPOOLSS", EXPFILL }}, { &ei_unknown_data, { "spoolss.unknown_data", PI_UNDECODED, PI_WARN, "Unknown data follows", EXPFILL }}, { &ei_printer_info_level, { "spoolss.printer.unknown", PI_PROTOCOL, PI_WARN, "Unknown printer info level", EXPFILL }}, { &ei_spool_printer_info_level, { "spoolss.spool_printer.unknown", PI_PROTOCOL, PI_WARN, "Unknown spool printer info level", EXPFILL }}, { &ei_form_level, { "spoolss.form.level.unknown", PI_PROTOCOL, PI_WARN, "Unknown form info level", EXPFILL }}, { &ei_job_info_level, { "spoolss.job_info.level.unknown", PI_PROTOCOL, PI_WARN, "Unknown job info level", EXPFILL }}, { &ei_driver_info_level, { "spoolss.driver_info.level.unknown", PI_PROTOCOL, PI_WARN, "Unknown driver info level", EXPFILL }}, { &ei_level, { "spoolss.level.unknown", PI_PROTOCOL, PI_WARN, "Info level unknown", EXPFILL }}, { &ei_notify_info_data_type, { "spoolss.notify_info_data.type.unknown", PI_PROTOCOL, PI_WARN, "Unknown notify type", EXPFILL }}, { &ei_enumprinterdataex_value, { "spoolss.enumprinterdataex.val_unknown", PI_PROTOCOL, PI_WARN, "Unknown value type", EXPFILL }}, }; expert_module_t* expert_dcerpc_spoolss; proto_dcerpc_spoolss = proto_register_protocol( "Microsoft Spool Subsystem", "SPOOLSS", "spoolss"); proto_register_field_array(proto_dcerpc_spoolss, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); expert_dcerpc_spoolss = expert_register_protocol(proto_dcerpc_spoolss); expert_register_field_array(expert_dcerpc_spoolss, ei, array_length(ei)); } /* Protocol handoff */ static e_guid_t uuid_dcerpc_spoolss = { 0x12345678, 0x1234, 0xabcd, { 0xef, 0x00, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab } }; static guint16 ver_dcerpc_spoolss = 1; void proto_reg_handoff_dcerpc_spoolss(void) { /* Register protocol as dcerpc */ dcerpc_init_uuid(proto_dcerpc_spoolss, ett_dcerpc_spoolss, &uuid_dcerpc_spoolss, ver_dcerpc_spoolss, dcerpc_spoolss_dissectors, hf_opnum); } /* * Editor modelines - http://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 8 * tab-width: 8 * indent-tabs-mode: t * End: * * vi: set shiftwidth=8 tabstop=8 noexpandtab: * :indentSize=8:tabSize=8:noTabs=false: */
./CrossVul/dataset_final_sorted/CWE-399/c/bad_5104_0
crossvul-cpp_data_bad_3486_14
/* * Performance event support - Freescale Embedded Performance Monitor * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * Copyright 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <asm/reg_fsl_emb.h> #include <asm/pmc.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/ptrace.h> struct cpu_hw_events { int n_events; int disabled; u8 pmcs_enabled; struct perf_event *event[MAX_HWEVENTS]; }; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); static struct fsl_emb_pmu *ppmu; /* Number of perf_events counting hardware events */ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); /* * If interrupts were soft-disabled when a PMU interrupt occurs, treat * it as an NMI. */ static inline int perf_intr_is_nmi(struct pt_regs *regs) { #ifdef __powerpc64__ return !regs->softe; #else return 0; #endif } static void perf_event_interrupt(struct pt_regs *regs); /* * Read one performance monitor counter (PMC). */ static unsigned long read_pmc(int idx) { unsigned long val; switch (idx) { case 0: val = mfpmr(PMRN_PMC0); break; case 1: val = mfpmr(PMRN_PMC1); break; case 2: val = mfpmr(PMRN_PMC2); break; case 3: val = mfpmr(PMRN_PMC3); break; default: printk(KERN_ERR "oops trying to read PMC%d\n", idx); val = 0; } return val; } /* * Write one PMC. */ static void write_pmc(int idx, unsigned long val) { switch (idx) { case 0: mtpmr(PMRN_PMC0, val); break; case 1: mtpmr(PMRN_PMC1, val); break; case 2: mtpmr(PMRN_PMC2, val); break; case 3: mtpmr(PMRN_PMC3, val); break; default: printk(KERN_ERR "oops trying to write PMC%d\n", idx); } isync(); } /* * Write one local control A register */ static void write_pmlca(int idx, unsigned long val) { switch (idx) { case 0: mtpmr(PMRN_PMLCA0, val); break; case 1: mtpmr(PMRN_PMLCA1, val); break; case 2: mtpmr(PMRN_PMLCA2, val); break; case 3: mtpmr(PMRN_PMLCA3, val); break; default: printk(KERN_ERR "oops trying to write PMLCA%d\n", idx); } isync(); } /* * Write one local control B register */ static void write_pmlcb(int idx, unsigned long val) { switch (idx) { case 0: mtpmr(PMRN_PMLCB0, val); break; case 1: mtpmr(PMRN_PMLCB1, val); break; case 2: mtpmr(PMRN_PMLCB2, val); break; case 3: mtpmr(PMRN_PMLCB3, val); break; default: printk(KERN_ERR "oops trying to write PMLCB%d\n", idx); } isync(); } static void fsl_emb_pmu_read(struct perf_event *event) { s64 val, delta, prev; if (event->hw.state & PERF_HES_STOPPED) return; /* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. * Therefore we treat them like NMIs. */ do { prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); /* The counters are only 32 bits wide */ delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } /* * Disable all events to prevent PMU interrupts and to allow * events to be added or removed. */ static void fsl_emb_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { cpuhw->disabled = 1; /* * Check if we ever enabled the PMU on this cpu. */ if (!cpuhw->pmcs_enabled) { ppc_enable_pmcs(); cpuhw->pmcs_enabled = 1; } if (atomic_read(&num_events)) { /* * Set the 'freeze all counters' bit, and disable * interrupts. The barrier is to make sure the * mtpmr has been executed and the PMU has frozen * the events before we return. */ mtpmr(PMRN_PMGC0, PMGC0_FAC); isync(); } } local_irq_restore(flags); } /* * Re-enable all events if disable == 0. * If we were previously disabled and events were added, then * put the new config on the PMU. */ static void fsl_emb_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) goto out; cpuhw->disabled = 0; ppc_set_pmu_inuse(cpuhw->n_events != 0); if (cpuhw->n_events > 0) { mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); isync(); } out: local_irq_restore(flags); } static int collect_events(struct perf_event *group, int max_count, struct perf_event *ctrs[]) { int n = 0; struct perf_event *event; if (!is_software_event(group)) { if (n >= max_count) return -1; ctrs[n] = group; n++; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; ctrs[n] = event; n++; } } return n; } /* context locked on entry */ static int fsl_emb_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw; int ret = -EAGAIN; int num_counters = ppmu->n_counter; u64 val; int i; perf_pmu_disable(event->pmu); cpuhw = &get_cpu_var(cpu_hw_events); if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) num_counters = ppmu->n_restricted; /* * Allocate counters from top-down, so that restricted-capable * counters are kept free as long as possible. */ for (i = num_counters - 1; i >= 0; i--) { if (cpuhw->event[i]) continue; break; } if (i < 0) goto out; event->hw.idx = i; cpuhw->event[i] = event; ++cpuhw->n_events; val = 0; if (event->hw.sample_period) { s64 left = local64_read(&event->hw.period_left); if (left < 0x80000000L) val = 0x80000000L - left; } local64_set(&event->hw.prev_count, val); if (!(flags & PERF_EF_START)) { event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; val = 0; } write_pmc(i, val); perf_event_update_userpage(event); write_pmlcb(i, event->hw.config >> 32); write_pmlca(i, event->hw.config_base); ret = 0; out: put_cpu_var(cpu_hw_events); perf_pmu_enable(event->pmu); return ret; } /* context locked on entry */ static void fsl_emb_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuhw; int i = event->hw.idx; perf_pmu_disable(event->pmu); if (i < 0) goto out; fsl_emb_pmu_read(event); cpuhw = &get_cpu_var(cpu_hw_events); WARN_ON(event != cpuhw->event[event->hw.idx]); write_pmlca(i, 0); write_pmlcb(i, 0); write_pmc(i, 0); cpuhw->event[i] = NULL; event->hw.idx = -1; /* * TODO: if at least one restricted event exists, and we * just freed up a non-restricted-capable counter, and * there is a restricted-capable counter occupied by * a non-restricted event, migrate that event to the * vacated counter. */ cpuhw->n_events--; out: perf_pmu_enable(event->pmu); put_cpu_var(cpu_hw_events); } static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) { unsigned long flags; s64 left; if (event->hw.idx < 0 || !event->hw.sample_period) return; if (!(event->hw.state & PERF_HES_STOPPED)) return; if (ef_flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); local_irq_save(flags); perf_pmu_disable(event->pmu); event->hw.state = 0; left = local64_read(&event->hw.period_left); write_pmc(event->hw.idx, left); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); } static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) { unsigned long flags; if (event->hw.idx < 0 || !event->hw.sample_period) return; if (event->hw.state & PERF_HES_STOPPED) return; local_irq_save(flags); perf_pmu_disable(event->pmu); fsl_emb_pmu_read(event); event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; write_pmc(event->hw.idx, 0); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); } /* * Release the PMU if this is the last perf_event. */ static void hw_perf_event_destroy(struct perf_event *event) { if (!atomic_add_unless(&num_events, -1, 1)) { mutex_lock(&pmc_reserve_mutex); if (atomic_dec_return(&num_events) == 0) release_pmc_hardware(); mutex_unlock(&pmc_reserve_mutex); } } /* * Translate a generic cache event_id config to a raw event_id code. */ static int hw_perf_cache_event(u64 config, u64 *eventp) { unsigned long type, op, result; int ev; if (!ppmu->cache_events) return -EINVAL; /* unpack config */ type = config & 0xff; op = (config >> 8) & 0xff; result = (config >> 16) & 0xff; if (type >= PERF_COUNT_HW_CACHE_MAX || op >= PERF_COUNT_HW_CACHE_OP_MAX || result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ev = (*ppmu->cache_events)[type][op][result]; if (ev == 0) return -EOPNOTSUPP; if (ev == -1) return -EINVAL; *eventp = ev; return 0; } static int fsl_emb_pmu_event_init(struct perf_event *event) { u64 ev; struct perf_event *events[MAX_HWEVENTS]; int n; int err; int num_restricted; int i; switch (event->attr.type) { case PERF_TYPE_HARDWARE: ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return -EOPNOTSUPP; ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: err = hw_perf_cache_event(event->attr.config, &ev); if (err) return err; break; case PERF_TYPE_RAW: ev = event->attr.config; break; default: return -ENOENT; } event->hw.config = ppmu->xlate_event(ev); if (!(event->hw.config & FSL_EMB_EVENT_VALID)) return -EINVAL; /* * If this is in a group, check if it can go on with all the * other hardware events in the group. We assume the event * hasn't been linked into its leader's sibling list at this point. */ n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, ppmu->n_counter - 1, events); if (n < 0) return -EINVAL; } if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { num_restricted = 0; for (i = 0; i < n; i++) { if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED) num_restricted++; } if (num_restricted >= ppmu->n_restricted) return -EINVAL; } event->hw.idx = -1; event->hw.config_base = PMLCA_CE | PMLCA_FCM1 | (u32)((ev << 16) & PMLCA_EVENT_MASK); if (event->attr.exclude_user) event->hw.config_base |= PMLCA_FCU; if (event->attr.exclude_kernel) event->hw.config_base |= PMLCA_FCS; if (event->attr.exclude_idle) return -ENOTSUPP; event->hw.last_period = event->hw.sample_period; local64_set(&event->hw.period_left, event->hw.last_period); /* * See if we need to reserve the PMU. * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing * reserve_pmc_hardware or release_pmc_hardware. */ err = 0; if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); if (atomic_read(&num_events) == 0 && reserve_pmc_hardware(perf_event_interrupt)) err = -EBUSY; else atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); mtpmr(PMRN_PMGC0, PMGC0_FAC); isync(); } event->destroy = hw_perf_event_destroy; return err; } static struct pmu fsl_emb_pmu = { .pmu_enable = fsl_emb_pmu_enable, .pmu_disable = fsl_emb_pmu_disable, .event_init = fsl_emb_pmu_event_init, .add = fsl_emb_pmu_add, .del = fsl_emb_pmu_del, .start = fsl_emb_pmu_start, .stop = fsl_emb_pmu_stop, .read = fsl_emb_pmu_read, }; /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled * here so there is no possibility of being interrupted. */ static void record_and_restart(struct perf_event *event, unsigned long val, struct pt_regs *regs, int nmi) { u64 period = event->hw.sample_period; s64 prev, delta, left; int record = 0; if (event->hw.state & PERF_HES_STOPPED) { write_pmc(event->hw.idx, 0); return; } /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); /* * See if the total period for this event has expired, * and update for the next period. */ val = 0; left = local64_read(&event->hw.period_left) - delta; if (period) { if (left <= 0) { left += period; if (left <= 0) left = period; record = 1; event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) val = 0x80000000LL - left; } write_pmc(event->hw.idx, val); local64_set(&event->hw.prev_count, val); local64_set(&event->hw.period_left, left); perf_event_update_userpage(event); /* * Finally record data if requested. */ if (record) { struct perf_sample_data data; perf_sample_data_init(&data, 0); data.period = event->hw.last_period; if (perf_event_overflow(event, nmi, &data, regs)) fsl_emb_pmu_stop(event, 0); } } static void perf_event_interrupt(struct pt_regs *regs) { int i; struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct perf_event *event; unsigned long val; int found = 0; int nmi; nmi = perf_intr_is_nmi(regs); if (nmi) nmi_enter(); else irq_enter(); for (i = 0; i < ppmu->n_counter; ++i) { event = cpuhw->event[i]; val = read_pmc(i); if ((int)val < 0) { if (event) { /* event has overflowed */ found = 1; record_and_restart(event, val, regs, nmi); } else { /* * Disabled counter is negative, * reset it just in case. */ write_pmc(i, 0); } } } /* PMM will keep counters frozen until we return from the interrupt. */ mtmsr(mfmsr() | MSR_PMM); mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); isync(); if (nmi) nmi_exit(); else irq_exit(); } void hw_perf_event_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); memset(cpuhw, 0, sizeof(*cpuhw)); } int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) { if (ppmu) return -EBUSY; /* something's already registered */ ppmu = pmu; pr_info("%s performance monitor hardware support registered\n", pmu->name); perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW); return 0; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_14
crossvul-cpp_data_good_5856_0
/* * An async IO implementation for Linux * Written by Benjamin LaHaise <bcrl@kvack.org> * * Implements an efficient asynchronous io interface. * * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. * * See ../COPYING for licensing terms. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/aio_abi.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/backing-dev.h> #include <linux/uio.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/mmu_context.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/aio.h> #include <linux/highmem.h> #include <linux/workqueue.h> #include <linux/security.h> #include <linux/eventfd.h> #include <linux/blkdev.h> #include <linux/compat.h> #include <linux/anon_inodes.h> #include <linux/migrate.h> #include <linux/ramfs.h> #include <linux/percpu-refcount.h> #include <asm/kmap_types.h> #include <asm/uaccess.h> #include "internal.h" #define AIO_RING_MAGIC 0xa10a10a1 #define AIO_RING_COMPAT_FEATURES 1 #define AIO_RING_INCOMPAT_FEATURES 0 struct aio_ring { unsigned id; /* kernel internal index number */ unsigned nr; /* number of io_events */ unsigned head; unsigned tail; unsigned magic; unsigned compat_features; unsigned incompat_features; unsigned header_length; /* size of aio_ring */ struct io_event io_events[0]; }; /* 128 bytes + ring size */ #define AIO_RING_PAGES 8 struct kioctx_table { struct rcu_head rcu; unsigned nr; struct kioctx *table[]; }; struct kioctx_cpu { unsigned reqs_available; }; struct kioctx { struct percpu_ref users; atomic_t dead; struct percpu_ref reqs; unsigned long user_id; struct __percpu kioctx_cpu *cpu; /* * For percpu reqs_available, number of slots we move to/from global * counter at a time: */ unsigned req_batch; /* * This is what userspace passed to io_setup(), it's not used for * anything but counting against the global max_reqs quota. * * The real limit is nr_events - 1, which will be larger (see * aio_setup_ring()) */ unsigned max_reqs; /* Size of ringbuffer, in units of struct io_event */ unsigned nr_events; unsigned long mmap_base; unsigned long mmap_size; struct page **ring_pages; long nr_pages; struct work_struct free_work; struct { /* * This counts the number of available slots in the ringbuffer, * so we avoid overflowing it: it's decremented (if positive) * when allocating a kiocb and incremented when the resulting * io_event is pulled off the ringbuffer. * * We batch accesses to it with a percpu version. */ atomic_t reqs_available; } ____cacheline_aligned_in_smp; struct { spinlock_t ctx_lock; struct list_head active_reqs; /* used for cancellation */ } ____cacheline_aligned_in_smp; struct { struct mutex ring_lock; wait_queue_head_t wait; } ____cacheline_aligned_in_smp; struct { unsigned tail; spinlock_t completion_lock; } ____cacheline_aligned_in_smp; struct page *internal_pages[AIO_RING_PAGES]; struct file *aio_ring_file; unsigned id; }; /*------ sysctl variables----*/ static DEFINE_SPINLOCK(aio_nr_lock); unsigned long aio_nr; /* current system wide number of aio requests */ unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ /*----end sysctl variables---*/ static struct kmem_cache *kiocb_cachep; static struct kmem_cache *kioctx_cachep; /* aio_setup * Creates the slab caches used by the aio routines, panic on * failure as this is done early during the boot sequence. */ static int __init aio_setup(void) { kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); return 0; } __initcall(aio_setup); static void put_aio_ring_file(struct kioctx *ctx) { struct file *aio_ring_file = ctx->aio_ring_file; if (aio_ring_file) { truncate_setsize(aio_ring_file->f_inode, 0); /* Prevent further access to the kioctx from migratepages */ spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock); aio_ring_file->f_inode->i_mapping->private_data = NULL; ctx->aio_ring_file = NULL; spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock); fput(aio_ring_file); } } static void aio_free_ring(struct kioctx *ctx) { int i; for (i = 0; i < ctx->nr_pages; i++) { pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, page_count(ctx->ring_pages[i])); put_page(ctx->ring_pages[i]); } put_aio_ring_file(ctx); if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) kfree(ctx->ring_pages); } static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) { vma->vm_ops = &generic_file_vm_ops; return 0; } static const struct file_operations aio_ring_fops = { .mmap = aio_ring_mmap, }; static int aio_set_page_dirty(struct page *page) { return 0; } #if IS_ENABLED(CONFIG_MIGRATION) static int aio_migratepage(struct address_space *mapping, struct page *new, struct page *old, enum migrate_mode mode) { struct kioctx *ctx; unsigned long flags; int rc; /* Writeback must be complete */ BUG_ON(PageWriteback(old)); put_page(old); rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); if (rc != MIGRATEPAGE_SUCCESS) { get_page(old); return rc; } get_page(new); /* We can potentially race against kioctx teardown here. Use the * address_space's private data lock to protect the mapping's * private_data. */ spin_lock(&mapping->private_lock); ctx = mapping->private_data; if (ctx) { pgoff_t idx; spin_lock_irqsave(&ctx->completion_lock, flags); migrate_page_copy(new, old); idx = old->index; if (idx < (pgoff_t)ctx->nr_pages) ctx->ring_pages[idx] = new; spin_unlock_irqrestore(&ctx->completion_lock, flags); } else rc = -EBUSY; spin_unlock(&mapping->private_lock); return rc; } #endif static const struct address_space_operations aio_ctx_aops = { .set_page_dirty = aio_set_page_dirty, #if IS_ENABLED(CONFIG_MIGRATION) .migratepage = aio_migratepage, #endif }; static int aio_setup_ring(struct kioctx *ctx) { struct aio_ring *ring; unsigned nr_events = ctx->max_reqs; struct mm_struct *mm = current->mm; unsigned long size, populate; int nr_pages; int i; struct file *file; /* Compensate for the ring buffer's head/tail overlap entry */ nr_events += 2; /* 1 is required, 2 for good luck */ size = sizeof(struct aio_ring); size += sizeof(struct io_event) * nr_events; nr_pages = PFN_UP(size); if (nr_pages < 0) return -EINVAL; file = anon_inode_getfile_private("[aio]", &aio_ring_fops, ctx, O_RDWR); if (IS_ERR(file)) { ctx->aio_ring_file = NULL; return -EAGAIN; } file->f_inode->i_mapping->a_ops = &aio_ctx_aops; file->f_inode->i_mapping->private_data = ctx; file->f_inode->i_size = PAGE_SIZE * (loff_t)nr_pages; for (i = 0; i < nr_pages; i++) { struct page *page; page = find_or_create_page(file->f_inode->i_mapping, i, GFP_HIGHUSER | __GFP_ZERO); if (!page) break; pr_debug("pid(%d) page[%d]->count=%d\n", current->pid, i, page_count(page)); SetPageUptodate(page); SetPageDirty(page); unlock_page(page); } ctx->aio_ring_file = file; nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); ctx->ring_pages = ctx->internal_pages; if (nr_pages > AIO_RING_PAGES) { ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!ctx->ring_pages) return -ENOMEM; } ctx->mmap_size = nr_pages * PAGE_SIZE; pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); down_write(&mm->mmap_sem); ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, 0, &populate); if (IS_ERR((void *)ctx->mmap_base)) { up_write(&mm->mmap_sem); ctx->mmap_size = 0; aio_free_ring(ctx); return -EAGAIN; } pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); /* We must do this while still holding mmap_sem for write, as we * need to be protected against userspace attempting to mremap() * or munmap() the ring buffer. */ ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, 1, 0, ctx->ring_pages, NULL); /* Dropping the reference here is safe as the page cache will hold * onto the pages for us. It is also required so that page migration * can unmap the pages and get the right reference count. */ for (i = 0; i < ctx->nr_pages; i++) put_page(ctx->ring_pages[i]); up_write(&mm->mmap_sem); if (unlikely(ctx->nr_pages != nr_pages)) { aio_free_ring(ctx); return -EAGAIN; } ctx->user_id = ctx->mmap_base; ctx->nr_events = nr_events; /* trusted copy */ ring = kmap_atomic(ctx->ring_pages[0]); ring->nr = nr_events; /* user copy */ ring->id = ~0U; ring->head = ring->tail = 0; ring->magic = AIO_RING_MAGIC; ring->compat_features = AIO_RING_COMPAT_FEATURES; ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; ring->header_length = sizeof(struct aio_ring); kunmap_atomic(ring); flush_dcache_page(ctx->ring_pages[0]); return 0; } #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) { struct kioctx *ctx = req->ki_ctx; unsigned long flags; spin_lock_irqsave(&ctx->ctx_lock, flags); if (!req->ki_list.next) list_add(&req->ki_list, &ctx->active_reqs); req->ki_cancel = cancel; spin_unlock_irqrestore(&ctx->ctx_lock, flags); } EXPORT_SYMBOL(kiocb_set_cancel_fn); static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) { kiocb_cancel_fn *old, *cancel; /* * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it * actually has a cancel function, hence the cmpxchg() */ cancel = ACCESS_ONCE(kiocb->ki_cancel); do { if (!cancel || cancel == KIOCB_CANCELLED) return -EINVAL; old = cancel; cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); } while (cancel != old); return cancel(kiocb); } static void free_ioctx(struct work_struct *work) { struct kioctx *ctx = container_of(work, struct kioctx, free_work); pr_debug("freeing %p\n", ctx); aio_free_ring(ctx); free_percpu(ctx->cpu); kmem_cache_free(kioctx_cachep, ctx); } static void free_ioctx_reqs(struct percpu_ref *ref) { struct kioctx *ctx = container_of(ref, struct kioctx, reqs); INIT_WORK(&ctx->free_work, free_ioctx); schedule_work(&ctx->free_work); } /* * When this function runs, the kioctx has been removed from the "hash table" * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - * now it's safe to cancel any that need to be. */ static void free_ioctx_users(struct percpu_ref *ref) { struct kioctx *ctx = container_of(ref, struct kioctx, users); struct kiocb *req; spin_lock_irq(&ctx->ctx_lock); while (!list_empty(&ctx->active_reqs)) { req = list_first_entry(&ctx->active_reqs, struct kiocb, ki_list); list_del_init(&req->ki_list); kiocb_cancel(ctx, req); } spin_unlock_irq(&ctx->ctx_lock); percpu_ref_kill(&ctx->reqs); percpu_ref_put(&ctx->reqs); } static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) { unsigned i, new_nr; struct kioctx_table *table, *old; struct aio_ring *ring; spin_lock(&mm->ioctx_lock); rcu_read_lock(); table = rcu_dereference(mm->ioctx_table); while (1) { if (table) for (i = 0; i < table->nr; i++) if (!table->table[i]) { ctx->id = i; table->table[i] = ctx; rcu_read_unlock(); spin_unlock(&mm->ioctx_lock); ring = kmap_atomic(ctx->ring_pages[0]); ring->id = ctx->id; kunmap_atomic(ring); return 0; } new_nr = (table ? table->nr : 1) * 4; rcu_read_unlock(); spin_unlock(&mm->ioctx_lock); table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * new_nr, GFP_KERNEL); if (!table) return -ENOMEM; table->nr = new_nr; spin_lock(&mm->ioctx_lock); rcu_read_lock(); old = rcu_dereference(mm->ioctx_table); if (!old) { rcu_assign_pointer(mm->ioctx_table, table); } else if (table->nr > old->nr) { memcpy(table->table, old->table, old->nr * sizeof(struct kioctx *)); rcu_assign_pointer(mm->ioctx_table, table); kfree_rcu(old, rcu); } else { kfree(table); table = old; } } } static void aio_nr_sub(unsigned nr) { spin_lock(&aio_nr_lock); if (WARN_ON(aio_nr - nr > aio_nr)) aio_nr = 0; else aio_nr -= nr; spin_unlock(&aio_nr_lock); } /* ioctx_alloc * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. */ static struct kioctx *ioctx_alloc(unsigned nr_events) { struct mm_struct *mm = current->mm; struct kioctx *ctx; int err = -ENOMEM; /* * We keep track of the number of available ringbuffer slots, to prevent * overflow (reqs_available), and we also use percpu counters for this. * * So since up to half the slots might be on other cpu's percpu counters * and unavailable, double nr_events so userspace sees what they * expected: additionally, we move req_batch slots to/from percpu * counters at a time, so make sure that isn't 0: */ nr_events = max(nr_events, num_possible_cpus() * 4); nr_events *= 2; /* Prevent overflows */ if ((nr_events > (0x10000000U / sizeof(struct io_event))) || (nr_events > (0x10000000U / sizeof(struct kiocb)))) { pr_debug("ENOMEM: nr_events too high\n"); return ERR_PTR(-EINVAL); } if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) return ERR_PTR(-EAGAIN); ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); ctx->max_reqs = nr_events; if (percpu_ref_init(&ctx->users, free_ioctx_users)) goto err; if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs)) goto err; spin_lock_init(&ctx->ctx_lock); spin_lock_init(&ctx->completion_lock); mutex_init(&ctx->ring_lock); init_waitqueue_head(&ctx->wait); INIT_LIST_HEAD(&ctx->active_reqs); ctx->cpu = alloc_percpu(struct kioctx_cpu); if (!ctx->cpu) goto err; if (aio_setup_ring(ctx) < 0) goto err; atomic_set(&ctx->reqs_available, ctx->nr_events - 1); ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); if (ctx->req_batch < 1) ctx->req_batch = 1; /* limit the number of system wide aios */ spin_lock(&aio_nr_lock); if (aio_nr + nr_events > (aio_max_nr * 2UL) || aio_nr + nr_events < aio_nr) { spin_unlock(&aio_nr_lock); err = -EAGAIN; goto err; } aio_nr += ctx->max_reqs; spin_unlock(&aio_nr_lock); percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ err = ioctx_add_table(ctx, mm); if (err) goto err_cleanup; pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", ctx, ctx->user_id, mm, ctx->nr_events); return ctx; err_cleanup: aio_nr_sub(ctx->max_reqs); err: free_percpu(ctx->cpu); free_percpu(ctx->reqs.pcpu_count); free_percpu(ctx->users.pcpu_count); kmem_cache_free(kioctx_cachep, ctx); pr_debug("error allocating ioctx %d\n", err); return ERR_PTR(err); } /* kill_ioctx * Cancels all outstanding aio requests on an aio context. Used * when the processes owning a context have all exited to encourage * the rapid destruction of the kioctx. */ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) { if (!atomic_xchg(&ctx->dead, 1)) { struct kioctx_table *table; spin_lock(&mm->ioctx_lock); rcu_read_lock(); table = rcu_dereference(mm->ioctx_table); WARN_ON(ctx != table->table[ctx->id]); table->table[ctx->id] = NULL; rcu_read_unlock(); spin_unlock(&mm->ioctx_lock); /* percpu_ref_kill() will do the necessary call_rcu() */ wake_up_all(&ctx->wait); /* * It'd be more correct to do this in free_ioctx(), after all * the outstanding kiocbs have finished - but by then io_destroy * has already returned, so io_setup() could potentially return * -EAGAIN with no ioctxs actually in use (as far as userspace * could tell). */ aio_nr_sub(ctx->max_reqs); if (ctx->mmap_size) vm_munmap(ctx->mmap_base, ctx->mmap_size); percpu_ref_kill(&ctx->users); } } /* wait_on_sync_kiocb: * Waits on the given sync kiocb to complete. */ ssize_t wait_on_sync_kiocb(struct kiocb *req) { while (!req->ki_ctx) { set_current_state(TASK_UNINTERRUPTIBLE); if (req->ki_ctx) break; io_schedule(); } __set_current_state(TASK_RUNNING); return req->ki_user_data; } EXPORT_SYMBOL(wait_on_sync_kiocb); /* * exit_aio: called when the last user of mm goes away. At this point, there is * no way for any new requests to be submited or any of the io_* syscalls to be * called on the context. * * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on * them. */ void exit_aio(struct mm_struct *mm) { struct kioctx_table *table; struct kioctx *ctx; unsigned i = 0; while (1) { rcu_read_lock(); table = rcu_dereference(mm->ioctx_table); do { if (!table || i >= table->nr) { rcu_read_unlock(); rcu_assign_pointer(mm->ioctx_table, NULL); if (table) kfree(table); return; } ctx = table->table[i++]; } while (!ctx); rcu_read_unlock(); /* * We don't need to bother with munmap() here - * exit_mmap(mm) is coming and it'll unmap everything. * Since aio_free_ring() uses non-zero ->mmap_size * as indicator that it needs to unmap the area, * just set it to 0; aio_free_ring() is the only * place that uses ->mmap_size, so it's safe. */ ctx->mmap_size = 0; kill_ioctx(mm, ctx); } } static void put_reqs_available(struct kioctx *ctx, unsigned nr) { struct kioctx_cpu *kcpu; preempt_disable(); kcpu = this_cpu_ptr(ctx->cpu); kcpu->reqs_available += nr; while (kcpu->reqs_available >= ctx->req_batch * 2) { kcpu->reqs_available -= ctx->req_batch; atomic_add(ctx->req_batch, &ctx->reqs_available); } preempt_enable(); } static bool get_reqs_available(struct kioctx *ctx) { struct kioctx_cpu *kcpu; bool ret = false; preempt_disable(); kcpu = this_cpu_ptr(ctx->cpu); if (!kcpu->reqs_available) { int old, avail = atomic_read(&ctx->reqs_available); do { if (avail < ctx->req_batch) goto out; old = avail; avail = atomic_cmpxchg(&ctx->reqs_available, avail, avail - ctx->req_batch); } while (avail != old); kcpu->reqs_available += ctx->req_batch; } ret = true; kcpu->reqs_available--; out: preempt_enable(); return ret; } /* aio_get_req * Allocate a slot for an aio request. * Returns NULL if no requests are free. */ static inline struct kiocb *aio_get_req(struct kioctx *ctx) { struct kiocb *req; if (!get_reqs_available(ctx)) return NULL; req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); if (unlikely(!req)) goto out_put; percpu_ref_get(&ctx->reqs); req->ki_ctx = ctx; return req; out_put: put_reqs_available(ctx, 1); return NULL; } static void kiocb_free(struct kiocb *req) { if (req->ki_filp) fput(req->ki_filp); if (req->ki_eventfd != NULL) eventfd_ctx_put(req->ki_eventfd); kmem_cache_free(kiocb_cachep, req); } static struct kioctx *lookup_ioctx(unsigned long ctx_id) { struct aio_ring __user *ring = (void __user *)ctx_id; struct mm_struct *mm = current->mm; struct kioctx *ctx, *ret = NULL; struct kioctx_table *table; unsigned id; if (get_user(id, &ring->id)) return NULL; rcu_read_lock(); table = rcu_dereference(mm->ioctx_table); if (!table || id >= table->nr) goto out; ctx = table->table[id]; if (ctx && ctx->user_id == ctx_id) { percpu_ref_get(&ctx->users); ret = ctx; } out: rcu_read_unlock(); return ret; } /* aio_complete * Called when the io request on the given iocb is complete. */ void aio_complete(struct kiocb *iocb, long res, long res2) { struct kioctx *ctx = iocb->ki_ctx; struct aio_ring *ring; struct io_event *ev_page, *event; unsigned long flags; unsigned tail, pos; /* * Special case handling for sync iocbs: * - events go directly into the iocb for fast handling * - the sync task with the iocb in its stack holds the single iocb * ref, no other paths have a way to get another ref * - the sync task helpfully left a reference to itself in the iocb */ if (is_sync_kiocb(iocb)) { iocb->ki_user_data = res; smp_wmb(); iocb->ki_ctx = ERR_PTR(-EXDEV); wake_up_process(iocb->ki_obj.tsk); return; } if (iocb->ki_list.next) { unsigned long flags; spin_lock_irqsave(&ctx->ctx_lock, flags); list_del(&iocb->ki_list); spin_unlock_irqrestore(&ctx->ctx_lock, flags); } /* * Add a completion event to the ring buffer. Must be done holding * ctx->completion_lock to prevent other code from messing with the tail * pointer since we might be called from irq context. */ spin_lock_irqsave(&ctx->completion_lock, flags); tail = ctx->tail; pos = tail + AIO_EVENTS_OFFSET; if (++tail >= ctx->nr_events) tail = 0; ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); event = ev_page + pos % AIO_EVENTS_PER_PAGE; event->obj = (u64)(unsigned long)iocb->ki_obj.user; event->data = iocb->ki_user_data; event->res = res; event->res2 = res2; kunmap_atomic(ev_page); flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, res, res2); /* after flagging the request as done, we * must never even look at it again */ smp_wmb(); /* make event visible before updating tail */ ctx->tail = tail; ring = kmap_atomic(ctx->ring_pages[0]); ring->tail = tail; kunmap_atomic(ring); flush_dcache_page(ctx->ring_pages[0]); spin_unlock_irqrestore(&ctx->completion_lock, flags); pr_debug("added to ring %p at [%u]\n", iocb, tail); /* * Check if the user asked us to deliver the result through an * eventfd. The eventfd_signal() function is safe to be called * from IRQ context. */ if (iocb->ki_eventfd != NULL) eventfd_signal(iocb->ki_eventfd, 1); /* everything turned out well, dispose of the aiocb. */ kiocb_free(iocb); /* * We have to order our ring_info tail store above and test * of the wait list below outside the wait lock. This is * like in wake_up_bit() where clearing a bit has to be * ordered with the unlocked test. */ smp_mb(); if (waitqueue_active(&ctx->wait)) wake_up(&ctx->wait); percpu_ref_put(&ctx->reqs); } EXPORT_SYMBOL(aio_complete); /* aio_read_events * Pull an event off of the ioctx's event ring. Returns the number of * events fetched */ static long aio_read_events_ring(struct kioctx *ctx, struct io_event __user *event, long nr) { struct aio_ring *ring; unsigned head, tail, pos; long ret = 0; int copy_ret; mutex_lock(&ctx->ring_lock); ring = kmap_atomic(ctx->ring_pages[0]); head = ring->head; tail = ring->tail; kunmap_atomic(ring); pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); if (head == tail) goto out; while (ret < nr) { long avail; struct io_event *ev; struct page *page; avail = (head <= tail ? tail : ctx->nr_events) - head; if (head == tail) break; avail = min(avail, nr - ret); avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); pos = head + AIO_EVENTS_OFFSET; page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; pos %= AIO_EVENTS_PER_PAGE; ev = kmap(page); copy_ret = copy_to_user(event + ret, ev + pos, sizeof(*ev) * avail); kunmap(page); if (unlikely(copy_ret)) { ret = -EFAULT; goto out; } ret += avail; head += avail; head %= ctx->nr_events; } ring = kmap_atomic(ctx->ring_pages[0]); ring->head = head; kunmap_atomic(ring); flush_dcache_page(ctx->ring_pages[0]); pr_debug("%li h%u t%u\n", ret, head, tail); put_reqs_available(ctx, ret); out: mutex_unlock(&ctx->ring_lock); return ret; } static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, struct io_event __user *event, long *i) { long ret = aio_read_events_ring(ctx, event + *i, nr - *i); if (ret > 0) *i += ret; if (unlikely(atomic_read(&ctx->dead))) ret = -EINVAL; if (!*i) *i = ret; return ret < 0 || *i >= min_nr; } static long read_events(struct kioctx *ctx, long min_nr, long nr, struct io_event __user *event, struct timespec __user *timeout) { ktime_t until = { .tv64 = KTIME_MAX }; long ret = 0; if (timeout) { struct timespec ts; if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) return -EFAULT; until = timespec_to_ktime(ts); } /* * Note that aio_read_events() is being called as the conditional - i.e. * we're calling it after prepare_to_wait() has set task state to * TASK_INTERRUPTIBLE. * * But aio_read_events() can block, and if it blocks it's going to flip * the task state back to TASK_RUNNING. * * This should be ok, provided it doesn't flip the state back to * TASK_RUNNING and return 0 too much - that causes us to spin. That * will only happen if the mutex_lock() call blocks, and we then find * the ringbuffer empty. So in practice we should be ok, but it's * something to be aware of when touching this code. */ wait_event_interruptible_hrtimeout(ctx->wait, aio_read_events(ctx, min_nr, nr, event, &ret), until); if (!ret && signal_pending(current)) ret = -EINTR; return ret; } /* sys_io_setup: * Create an aio_context capable of receiving at least nr_events. * ctxp must not point to an aio_context that already exists, and * must be initialized to 0 prior to the call. On successful * creation of the aio_context, *ctxp is filled in with the resulting * handle. May fail with -EINVAL if *ctxp is not initialized, * if the specified nr_events exceeds internal limits. May fail * with -EAGAIN if the specified nr_events exceeds the user's limit * of available events. May fail with -ENOMEM if insufficient kernel * resources are available. May fail with -EFAULT if an invalid * pointer is passed for ctxp. Will fail with -ENOSYS if not * implemented. */ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) { struct kioctx *ioctx = NULL; unsigned long ctx; long ret; ret = get_user(ctx, ctxp); if (unlikely(ret)) goto out; ret = -EINVAL; if (unlikely(ctx || nr_events == 0)) { pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", ctx, nr_events); goto out; } ioctx = ioctx_alloc(nr_events); ret = PTR_ERR(ioctx); if (!IS_ERR(ioctx)) { ret = put_user(ioctx->user_id, ctxp); if (ret) kill_ioctx(current->mm, ioctx); percpu_ref_put(&ioctx->users); } out: return ret; } /* sys_io_destroy: * Destroy the aio_context specified. May cancel any outstanding * AIOs and block on completion. Will fail with -ENOSYS if not * implemented. May fail with -EINVAL if the context pointed to * is invalid. */ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) { struct kioctx *ioctx = lookup_ioctx(ctx); if (likely(NULL != ioctx)) { kill_ioctx(current->mm, ioctx); percpu_ref_put(&ioctx->users); return 0; } pr_debug("EINVAL: io_destroy: invalid context id\n"); return -EINVAL; } typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *, unsigned long, loff_t); static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb, int rw, char __user *buf, unsigned long *nr_segs, struct iovec **iovec, bool compat) { ssize_t ret; *nr_segs = kiocb->ki_nbytes; #ifdef CONFIG_COMPAT if (compat) ret = compat_rw_copy_check_uvector(rw, (struct compat_iovec __user *)buf, *nr_segs, 1, *iovec, iovec); else #endif ret = rw_copy_check_uvector(rw, (struct iovec __user *)buf, *nr_segs, 1, *iovec, iovec); if (ret < 0) return ret; /* ki_nbytes now reflect bytes instead of segs */ kiocb->ki_nbytes = ret; return 0; } static ssize_t aio_setup_single_vector(struct kiocb *kiocb, int rw, char __user *buf, unsigned long *nr_segs, struct iovec *iovec) { if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes))) return -EFAULT; iovec->iov_base = buf; iovec->iov_len = kiocb->ki_nbytes; *nr_segs = 1; return 0; } /* * aio_setup_iocb: * Performs the initial checks and aio retry method * setup for the kiocb at the time of io submission. */ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, char __user *buf, bool compat) { struct file *file = req->ki_filp; ssize_t ret; unsigned long nr_segs; int rw; fmode_t mode; aio_rw_op *rw_op; struct iovec inline_vec, *iovec = &inline_vec; switch (opcode) { case IOCB_CMD_PREAD: case IOCB_CMD_PREADV: mode = FMODE_READ; rw = READ; rw_op = file->f_op->aio_read; goto rw_common; case IOCB_CMD_PWRITE: case IOCB_CMD_PWRITEV: mode = FMODE_WRITE; rw = WRITE; rw_op = file->f_op->aio_write; goto rw_common; rw_common: if (unlikely(!(file->f_mode & mode))) return -EBADF; if (!rw_op) return -EINVAL; ret = (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV) ? aio_setup_vectored_rw(req, rw, buf, &nr_segs, &iovec, compat) : aio_setup_single_vector(req, rw, buf, &nr_segs, iovec); if (ret) return ret; ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); if (ret < 0) { if (iovec != &inline_vec) kfree(iovec); return ret; } req->ki_nbytes = ret; /* XXX: move/kill - rw_verify_area()? */ /* This matches the pread()/pwrite() logic */ if (req->ki_pos < 0) { ret = -EINVAL; break; } if (rw == WRITE) file_start_write(file); ret = rw_op(req, iovec, nr_segs, req->ki_pos); if (rw == WRITE) file_end_write(file); break; case IOCB_CMD_FDSYNC: if (!file->f_op->aio_fsync) return -EINVAL; ret = file->f_op->aio_fsync(req, 1); break; case IOCB_CMD_FSYNC: if (!file->f_op->aio_fsync) return -EINVAL; ret = file->f_op->aio_fsync(req, 0); break; default: pr_debug("EINVAL: no operation provided\n"); return -EINVAL; } if (iovec != &inline_vec) kfree(iovec); if (ret != -EIOCBQUEUED) { /* * There's no easy way to restart the syscall since other AIO's * may be already running. Just fail this IO with EINTR. */ if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK)) ret = -EINTR; aio_complete(req, ret, 0); } return 0; } static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, struct iocb *iocb, bool compat) { struct kiocb *req; ssize_t ret; /* enforce forwards compatibility on users */ if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { pr_debug("EINVAL: reserve field set\n"); return -EINVAL; } /* prevent overflows */ if (unlikely( (iocb->aio_buf != (unsigned long)iocb->aio_buf) || (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || ((ssize_t)iocb->aio_nbytes < 0) )) { pr_debug("EINVAL: io_submit: overflow check\n"); return -EINVAL; } req = aio_get_req(ctx); if (unlikely(!req)) return -EAGAIN; req->ki_filp = fget(iocb->aio_fildes); if (unlikely(!req->ki_filp)) { ret = -EBADF; goto out_put_req; } if (iocb->aio_flags & IOCB_FLAG_RESFD) { /* * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an * instance of the file* now. The file descriptor must be * an eventfd() fd, and will be signaled for each completed * event using the eventfd_signal() function. */ req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); if (IS_ERR(req->ki_eventfd)) { ret = PTR_ERR(req->ki_eventfd); req->ki_eventfd = NULL; goto out_put_req; } } ret = put_user(KIOCB_KEY, &user_iocb->aio_key); if (unlikely(ret)) { pr_debug("EFAULT: aio_key\n"); goto out_put_req; } req->ki_obj.user = user_iocb; req->ki_user_data = iocb->aio_data; req->ki_pos = iocb->aio_offset; req->ki_nbytes = iocb->aio_nbytes; ret = aio_run_iocb(req, iocb->aio_lio_opcode, (char __user *)(unsigned long)iocb->aio_buf, compat); if (ret) goto out_put_req; return 0; out_put_req: put_reqs_available(ctx, 1); percpu_ref_put(&ctx->reqs); kiocb_free(req); return ret; } long do_io_submit(aio_context_t ctx_id, long nr, struct iocb __user *__user *iocbpp, bool compat) { struct kioctx *ctx; long ret = 0; int i = 0; struct blk_plug plug; if (unlikely(nr < 0)) return -EINVAL; if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) nr = LONG_MAX/sizeof(*iocbpp); if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) return -EFAULT; ctx = lookup_ioctx(ctx_id); if (unlikely(!ctx)) { pr_debug("EINVAL: invalid context id\n"); return -EINVAL; } blk_start_plug(&plug); /* * AKPM: should this return a partial result if some of the IOs were * successfully submitted? */ for (i=0; i<nr; i++) { struct iocb __user *user_iocb; struct iocb tmp; if (unlikely(__get_user(user_iocb, iocbpp + i))) { ret = -EFAULT; break; } if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { ret = -EFAULT; break; } ret = io_submit_one(ctx, user_iocb, &tmp, compat); if (ret) break; } blk_finish_plug(&plug); percpu_ref_put(&ctx->users); return i ? i : ret; } /* sys_io_submit: * Queue the nr iocbs pointed to by iocbpp for processing. Returns * the number of iocbs queued. May return -EINVAL if the aio_context * specified by ctx_id is invalid, if nr is < 0, if the iocb at * *iocbpp[0] is not properly initialized, if the operation specified * is invalid for the file descriptor in the iocb. May fail with * -EFAULT if any of the data structures point to invalid data. May * fail with -EBADF if the file descriptor specified in the first * iocb is invalid. May fail with -EAGAIN if insufficient resources * are available to queue any iocbs. Will return 0 if nr is 0. Will * fail with -ENOSYS if not implemented. */ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, struct iocb __user * __user *, iocbpp) { return do_io_submit(ctx_id, nr, iocbpp, 0); } /* lookup_kiocb * Finds a given iocb for cancellation. */ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) { struct list_head *pos; assert_spin_locked(&ctx->ctx_lock); if (key != KIOCB_KEY) return NULL; /* TODO: use a hash or array, this sucks. */ list_for_each(pos, &ctx->active_reqs) { struct kiocb *kiocb = list_kiocb(pos); if (kiocb->ki_obj.user == iocb) return kiocb; } return NULL; } /* sys_io_cancel: * Attempts to cancel an iocb previously passed to io_submit. If * the operation is successfully cancelled, the resulting event is * copied into the memory pointed to by result without being placed * into the completion queue and 0 is returned. May fail with * -EFAULT if any of the data structures pointed to are invalid. * May fail with -EINVAL if aio_context specified by ctx_id is * invalid. May fail with -EAGAIN if the iocb specified was not * cancelled. Will fail with -ENOSYS if not implemented. */ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result) { struct kioctx *ctx; struct kiocb *kiocb; u32 key; int ret; ret = get_user(key, &iocb->aio_key); if (unlikely(ret)) return -EFAULT; ctx = lookup_ioctx(ctx_id); if (unlikely(!ctx)) return -EINVAL; spin_lock_irq(&ctx->ctx_lock); kiocb = lookup_kiocb(ctx, iocb, key); if (kiocb) ret = kiocb_cancel(ctx, kiocb); else ret = -EINVAL; spin_unlock_irq(&ctx->ctx_lock); if (!ret) { /* * The result argument is no longer used - the io_event is * always delivered via the ring buffer. -EINPROGRESS indicates * cancellation is progress: */ ret = -EINPROGRESS; } percpu_ref_put(&ctx->users); return ret; } /* io_getevents: * Attempts to read at least min_nr events and up to nr events from * the completion queue for the aio_context specified by ctx_id. If * it succeeds, the number of read events is returned. May fail with * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is * out of range, if timeout is out of range. May fail with -EFAULT * if any of the memory specified is invalid. May return 0 or * < min_nr if the timeout specified by timeout has elapsed * before sufficient events are available, where timeout == NULL * specifies an infinite timeout. Note that the timeout pointed to by * timeout is relative. Will fail with -ENOSYS if not implemented. */ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, long, min_nr, long, nr, struct io_event __user *, events, struct timespec __user *, timeout) { struct kioctx *ioctx = lookup_ioctx(ctx_id); long ret = -EINVAL; if (likely(ioctx)) { if (likely(min_nr <= nr && min_nr >= 0)) ret = read_events(ioctx, min_nr, nr, events, timeout); percpu_ref_put(&ioctx->users); } return ret; }
./CrossVul/dataset_final_sorted/CWE-399/c/good_5856_0
crossvul-cpp_data_bad_2144_8
/* * Copyright (c) Ian F. Darwin 1986-1995. * Software written by Ian F. Darwin and others; * maintained 1995-present by Christos Zoulas and others. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * apprentice - make one pass through /etc/magic, learning its secrets. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: apprentice.c,v 1.209 2014/05/13 16:42:17 christos Exp $") #endif /* lint */ #include "magic.h" #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_STDDEF_H #include <stddef.h> #endif #include <string.h> #include <assert.h> #include <ctype.h> #include <fcntl.h> #ifdef QUICK #include <sys/mman.h> #endif #include <dirent.h> #if defined(HAVE_LIMITS_H) #include <limits.h> #endif #ifndef SSIZE_MAX #define MAXMAGIC_SIZE ((ssize_t)0x7fffffff) #else #define MAXMAGIC_SIZE SSIZE_MAX #endif #define EATAB {while (isascii((unsigned char) *l) && \ isspace((unsigned char) *l)) ++l;} #define LOWCASE(l) (isupper((unsigned char) (l)) ? \ tolower((unsigned char) (l)) : (l)) /* * Work around a bug in headers on Digital Unix. * At least confirmed for: OSF1 V4.0 878 */ #if defined(__osf__) && defined(__DECC) #ifdef MAP_FAILED #undef MAP_FAILED #endif #endif #ifndef MAP_FAILED #define MAP_FAILED (void *) -1 #endif #ifndef MAP_FILE #define MAP_FILE 0 #endif #define ALLOC_CHUNK (size_t)10 #define ALLOC_INCR (size_t)200 struct magic_entry { struct magic *mp; uint32_t cont_count; uint32_t max_count; }; struct magic_entry_set { struct magic_entry *me; uint32_t count; uint32_t max; }; struct magic_map { void *p; size_t len; struct magic *magic[MAGIC_SETS]; uint32_t nmagic[MAGIC_SETS]; }; int file_formats[FILE_NAMES_SIZE]; const size_t file_nformats = FILE_NAMES_SIZE; const char *file_names[FILE_NAMES_SIZE]; const size_t file_nnames = FILE_NAMES_SIZE; private int getvalue(struct magic_set *ms, struct magic *, const char **, int); private int hextoint(int); private const char *getstr(struct magic_set *, struct magic *, const char *, int); private int parse(struct magic_set *, struct magic_entry *, const char *, size_t, int); private void eatsize(const char **); private int apprentice_1(struct magic_set *, const char *, int); private size_t apprentice_magic_strength(const struct magic *); private int apprentice_sort(const void *, const void *); private void apprentice_list(struct mlist *, int ); private struct magic_map *apprentice_load(struct magic_set *, const char *, int); private struct mlist *mlist_alloc(void); private void mlist_free(struct mlist *); private void byteswap(struct magic *, uint32_t); private void bs1(struct magic *); private uint16_t swap2(uint16_t); private uint32_t swap4(uint32_t); private uint64_t swap8(uint64_t); private char *mkdbname(struct magic_set *, const char *, int); private struct magic_map *apprentice_map(struct magic_set *, const char *); private void apprentice_unmap(struct magic_map *); private int apprentice_compile(struct magic_set *, struct magic_map *, const char *); private int check_format_type(const char *, int); private int check_format(struct magic_set *, struct magic *); private int get_op(char); private int parse_mime(struct magic_set *, struct magic_entry *, const char *); private int parse_strength(struct magic_set *, struct magic_entry *, const char *); private int parse_apple(struct magic_set *, struct magic_entry *, const char *); private size_t magicsize = sizeof(struct magic); private const char usg_hdr[] = "cont\toffset\ttype\topcode\tmask\tvalue\tdesc"; private struct { const char *name; size_t len; int (*fun)(struct magic_set *, struct magic_entry *, const char *); } bang[] = { #define DECLARE_FIELD(name) { # name, sizeof(# name) - 1, parse_ ## name } DECLARE_FIELD(mime), DECLARE_FIELD(apple), DECLARE_FIELD(strength), #undef DECLARE_FIELD { NULL, 0, NULL } }; #ifdef COMPILE_ONLY int main(int, char *[]); int main(int argc, char *argv[]) { int ret; struct magic_set *ms; char *progname; if ((progname = strrchr(argv[0], '/')) != NULL) progname++; else progname = argv[0]; if (argc != 2) { (void)fprintf(stderr, "Usage: %s file\n", progname); return 1; } if ((ms = magic_open(MAGIC_CHECK)) == NULL) { (void)fprintf(stderr, "%s: %s\n", progname, strerror(errno)); return 1; } ret = magic_compile(ms, argv[1]) == -1 ? 1 : 0; if (ret == 1) (void)fprintf(stderr, "%s: %s\n", progname, magic_error(ms)); magic_close(ms); return ret; } #endif /* COMPILE_ONLY */ struct type_tbl_s { const char name[16]; const size_t len; const int type; const int format; }; /* * XXX - the actual Single UNIX Specification says that "long" means "long", * as in the C data type, but we treat it as meaning "4-byte integer". * Given that the OS X version of file 5.04 did the same, I guess that passes * the actual test; having "long" be dependent on how big a "long" is on * the machine running "file" is silly. */ static const struct type_tbl_s type_tbl[] = { # define XX(s) s, (sizeof(s) - 1) # define XX_NULL "", 0 { XX("invalid"), FILE_INVALID, FILE_FMT_NONE }, { XX("byte"), FILE_BYTE, FILE_FMT_NUM }, { XX("short"), FILE_SHORT, FILE_FMT_NUM }, { XX("default"), FILE_DEFAULT, FILE_FMT_NONE }, { XX("long"), FILE_LONG, FILE_FMT_NUM }, { XX("string"), FILE_STRING, FILE_FMT_STR }, { XX("date"), FILE_DATE, FILE_FMT_STR }, { XX("beshort"), FILE_BESHORT, FILE_FMT_NUM }, { XX("belong"), FILE_BELONG, FILE_FMT_NUM }, { XX("bedate"), FILE_BEDATE, FILE_FMT_STR }, { XX("leshort"), FILE_LESHORT, FILE_FMT_NUM }, { XX("lelong"), FILE_LELONG, FILE_FMT_NUM }, { XX("ledate"), FILE_LEDATE, FILE_FMT_STR }, { XX("pstring"), FILE_PSTRING, FILE_FMT_STR }, { XX("ldate"), FILE_LDATE, FILE_FMT_STR }, { XX("beldate"), FILE_BELDATE, FILE_FMT_STR }, { XX("leldate"), FILE_LELDATE, FILE_FMT_STR }, { XX("regex"), FILE_REGEX, FILE_FMT_STR }, { XX("bestring16"), FILE_BESTRING16, FILE_FMT_STR }, { XX("lestring16"), FILE_LESTRING16, FILE_FMT_STR }, { XX("search"), FILE_SEARCH, FILE_FMT_STR }, { XX("medate"), FILE_MEDATE, FILE_FMT_STR }, { XX("meldate"), FILE_MELDATE, FILE_FMT_STR }, { XX("melong"), FILE_MELONG, FILE_FMT_NUM }, { XX("quad"), FILE_QUAD, FILE_FMT_QUAD }, { XX("lequad"), FILE_LEQUAD, FILE_FMT_QUAD }, { XX("bequad"), FILE_BEQUAD, FILE_FMT_QUAD }, { XX("qdate"), FILE_QDATE, FILE_FMT_STR }, { XX("leqdate"), FILE_LEQDATE, FILE_FMT_STR }, { XX("beqdate"), FILE_BEQDATE, FILE_FMT_STR }, { XX("qldate"), FILE_QLDATE, FILE_FMT_STR }, { XX("leqldate"), FILE_LEQLDATE, FILE_FMT_STR }, { XX("beqldate"), FILE_BEQLDATE, FILE_FMT_STR }, { XX("float"), FILE_FLOAT, FILE_FMT_FLOAT }, { XX("befloat"), FILE_BEFLOAT, FILE_FMT_FLOAT }, { XX("lefloat"), FILE_LEFLOAT, FILE_FMT_FLOAT }, { XX("double"), FILE_DOUBLE, FILE_FMT_DOUBLE }, { XX("bedouble"), FILE_BEDOUBLE, FILE_FMT_DOUBLE }, { XX("ledouble"), FILE_LEDOUBLE, FILE_FMT_DOUBLE }, { XX("leid3"), FILE_LEID3, FILE_FMT_NUM }, { XX("beid3"), FILE_BEID3, FILE_FMT_NUM }, { XX("indirect"), FILE_INDIRECT, FILE_FMT_NUM }, { XX("qwdate"), FILE_QWDATE, FILE_FMT_STR }, { XX("leqwdate"), FILE_LEQWDATE, FILE_FMT_STR }, { XX("beqwdate"), FILE_BEQWDATE, FILE_FMT_STR }, { XX("name"), FILE_NAME, FILE_FMT_NONE }, { XX("use"), FILE_USE, FILE_FMT_NONE }, { XX("clear"), FILE_CLEAR, FILE_FMT_NONE }, { XX_NULL, FILE_INVALID, FILE_FMT_NONE }, }; /* * These are not types, and cannot be preceded by "u" to make them * unsigned. */ static const struct type_tbl_s special_tbl[] = { { XX("name"), FILE_NAME, FILE_FMT_STR }, { XX("use"), FILE_USE, FILE_FMT_STR }, { XX_NULL, FILE_INVALID, FILE_FMT_NONE }, }; # undef XX # undef XX_NULL private int get_type(const struct type_tbl_s *tbl, const char *l, const char **t) { const struct type_tbl_s *p; for (p = tbl; p->len; p++) { if (strncmp(l, p->name, p->len) == 0) { if (t) *t = l + p->len; break; } } return p->type; } private int get_standard_integer_type(const char *l, const char **t) { int type; if (isalpha((unsigned char)l[1])) { switch (l[1]) { case 'C': /* "dC" and "uC" */ type = FILE_BYTE; break; case 'S': /* "dS" and "uS" */ type = FILE_SHORT; break; case 'I': case 'L': /* * "dI", "dL", "uI", and "uL". * * XXX - the actual Single UNIX Specification says * that "L" means "long", as in the C data type, * but we treat it as meaning "4-byte integer". * Given that the OS X version of file 5.04 did * the same, I guess that passes the actual SUS * validation suite; having "dL" be dependent on * how big a "long" is on the machine running * "file" is silly. */ type = FILE_LONG; break; case 'Q': /* "dQ" and "uQ" */ type = FILE_QUAD; break; default: /* "d{anything else}", "u{anything else}" */ return FILE_INVALID; } l += 2; } else if (isdigit((unsigned char)l[1])) { /* * "d{num}" and "u{num}"; we only support {num} values * of 1, 2, 4, and 8 - the Single UNIX Specification * doesn't say anything about whether arbitrary * values should be supported, but both the Solaris 10 * and OS X Mountain Lion versions of file passed the * Single UNIX Specification validation suite, and * neither of them support values bigger than 8 or * non-power-of-2 values. */ if (isdigit((unsigned char)l[2])) { /* Multi-digit, so > 9 */ return FILE_INVALID; } switch (l[1]) { case '1': type = FILE_BYTE; break; case '2': type = FILE_SHORT; break; case '4': type = FILE_LONG; break; case '8': type = FILE_QUAD; break; default: /* XXX - what about 3, 5, 6, or 7? */ return FILE_INVALID; } l += 2; } else { /* * "d" or "u" by itself. */ type = FILE_LONG; ++l; } if (t) *t = l; return type; } private void init_file_tables(void) { static int done = 0; const struct type_tbl_s *p; if (done) return; done++; for (p = type_tbl; p->len; p++) { assert(p->type < FILE_NAMES_SIZE); file_names[p->type] = p->name; file_formats[p->type] = p->format; } assert(p - type_tbl == FILE_NAMES_SIZE); } private int add_mlist(struct mlist *mlp, struct magic_map *map, size_t idx) { struct mlist *ml; if ((ml = CAST(struct mlist *, malloc(sizeof(*ml)))) == NULL) return -1; ml->map = idx == 0 ? map : NULL; ml->magic = map->magic[idx]; ml->nmagic = map->nmagic[idx]; mlp->prev->next = ml; ml->prev = mlp->prev; ml->next = mlp; mlp->prev = ml; return 0; } /* * Handle one file or directory. */ private int apprentice_1(struct magic_set *ms, const char *fn, int action) { struct mlist *ml; struct magic_map *map; size_t i; if (magicsize != FILE_MAGICSIZE) { file_error(ms, 0, "magic element size %lu != %lu", (unsigned long)sizeof(*map->magic[0]), (unsigned long)FILE_MAGICSIZE); return -1; } if (action == FILE_COMPILE) { map = apprentice_load(ms, fn, action); if (map == NULL) return -1; return apprentice_compile(ms, map, fn); } #ifndef COMPILE_ONLY map = apprentice_map(ms, fn); if (map == NULL) { if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "using regular magic file `%s'", fn); map = apprentice_load(ms, fn, action); if (map == NULL) return -1; } for (i = 0; i < MAGIC_SETS; i++) { if (add_mlist(ms->mlist[i], map, i) == -1) { file_oomem(ms, sizeof(*ml)); apprentice_unmap(map); return -1; } } if (action == FILE_LIST) { for (i = 0; i < MAGIC_SETS; i++) { printf("Set %zu:\nBinary patterns:\n", i); apprentice_list(ms->mlist[i], BINTEST); printf("Text patterns:\n"); apprentice_list(ms->mlist[i], TEXTTEST); } } return 0; #endif /* COMPILE_ONLY */ } protected void file_ms_free(struct magic_set *ms) { size_t i; if (ms == NULL) return; for (i = 0; i < MAGIC_SETS; i++) mlist_free(ms->mlist[i]); free(ms->o.pbuf); free(ms->o.buf); free(ms->c.li); free(ms); } protected struct magic_set * file_ms_alloc(int flags) { struct magic_set *ms; size_t i, len; if ((ms = CAST(struct magic_set *, calloc((size_t)1, sizeof(struct magic_set)))) == NULL) return NULL; if (magic_setflags(ms, flags) == -1) { errno = EINVAL; goto free; } ms->o.buf = ms->o.pbuf = NULL; len = (ms->c.len = 10) * sizeof(*ms->c.li); if ((ms->c.li = CAST(struct level_info *, malloc(len))) == NULL) goto free; ms->event_flags = 0; ms->error = -1; for (i = 0; i < MAGIC_SETS; i++) ms->mlist[i] = NULL; ms->file = "unknown"; ms->line = 0; return ms; free: free(ms); return NULL; } private void apprentice_unmap(struct magic_map *map) { if (map == NULL) return; if (map->p != NULL) { #ifdef QUICK if (map->len) (void)munmap(map->p, map->len); else #endif free(map->p); } else { uint32_t j; for (j = 0; j < MAGIC_SETS; j++) free(map->magic[j]); } free(map); } private struct mlist * mlist_alloc(void) { struct mlist *mlist; if ((mlist = CAST(struct mlist *, calloc(1, sizeof(*mlist)))) == NULL) { return NULL; } mlist->next = mlist->prev = mlist; return mlist; } private void mlist_free(struct mlist *mlist) { struct mlist *ml; if (mlist == NULL) return; for (ml = mlist->next; ml != mlist;) { struct mlist *next = ml->next; if (ml->map) apprentice_unmap(ml->map); free(ml); ml = next; } free(ml); } /* const char *fn: list of magic files and directories */ protected int file_apprentice(struct magic_set *ms, const char *fn, int action) { char *p, *mfn; int file_err, errs = -1; size_t i; if (ms->mlist[0] != NULL) file_reset(ms); if ((fn = magic_getpath(fn, action)) == NULL) return -1; init_file_tables(); if ((mfn = strdup(fn)) == NULL) { file_oomem(ms, strlen(fn)); return -1; } for (i = 0; i < MAGIC_SETS; i++) { mlist_free(ms->mlist[i]); if ((ms->mlist[i] = mlist_alloc()) == NULL) { file_oomem(ms, sizeof(*ms->mlist[i])); if (i != 0) { --i; do mlist_free(ms->mlist[i]); while (i != 0); } free(mfn); return -1; } } fn = mfn; while (fn) { p = strchr(fn, PATHSEP); if (p) *p++ = '\0'; if (*fn == '\0') break; file_err = apprentice_1(ms, fn, action); errs = MAX(errs, file_err); fn = p; } free(mfn); if (errs == -1) { for (i = 0; i < MAGIC_SETS; i++) { mlist_free(ms->mlist[i]); ms->mlist[i] = NULL; } file_error(ms, 0, "could not find any valid magic files!"); return -1; } #if 0 /* * Always leave the database loaded */ if (action == FILE_LOAD) return 0; for (i = 0; i < MAGIC_SETS; i++) { mlist_free(ms->mlist[i]); ms->mlist[i] = NULL; } #endif switch (action) { case FILE_LOAD: case FILE_COMPILE: case FILE_CHECK: case FILE_LIST: return 0; default: file_error(ms, 0, "Invalid action %d", action); return -1; } } /* * Compute the real length of a magic expression, for the purposes * of determining how "strong" a magic expression is (approximating * how specific its matches are): * - magic characters count 0 unless escaped. * - [] expressions count 1 * - {} expressions count 0 * - regular characters or escaped magic characters count 1 * - 0 length expressions count as one */ private size_t nonmagic(const char *str) { const char *p; size_t rv = 0; for (p = str; *p; p++) switch (*p) { case '\\': /* Escaped anything counts 1 */ if (!*++p) p--; rv++; continue; case '?': /* Magic characters count 0 */ case '*': case '.': case '+': case '^': case '$': continue; case '[': /* Bracketed expressions count 1 the ']' */ while (*p && *p != ']') p++; p--; continue; case '{': /* Braced expressions count 0 */ while (*p && *p != '}') p++; if (!*p) p--; continue; default: /* Anything else counts 1 */ rv++; continue; } return rv == 0 ? 1 : rv; /* Return at least 1 */ } /* * Get weight of this magic entry, for sorting purposes. */ private size_t apprentice_magic_strength(const struct magic *m) { #define MULT 10 size_t v, val = 2 * MULT; /* baseline strength */ switch (m->type) { case FILE_DEFAULT: /* make sure this sorts last */ if (m->factor_op != FILE_FACTOR_OP_NONE) abort(); return 0; case FILE_BYTE: val += 1 * MULT; break; case FILE_SHORT: case FILE_LESHORT: case FILE_BESHORT: val += 2 * MULT; break; case FILE_LONG: case FILE_LELONG: case FILE_BELONG: case FILE_MELONG: val += 4 * MULT; break; case FILE_PSTRING: case FILE_STRING: val += m->vallen * MULT; break; case FILE_BESTRING16: case FILE_LESTRING16: val += m->vallen * MULT / 2; break; case FILE_SEARCH: val += m->vallen * MAX(MULT / m->vallen, 1); break; case FILE_REGEX: v = nonmagic(m->value.s); val += v * MAX(MULT / v, 1); break; case FILE_DATE: case FILE_LEDATE: case FILE_BEDATE: case FILE_MEDATE: case FILE_LDATE: case FILE_LELDATE: case FILE_BELDATE: case FILE_MELDATE: case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: val += 4 * MULT; break; case FILE_QUAD: case FILE_BEQUAD: case FILE_LEQUAD: case FILE_QDATE: case FILE_LEQDATE: case FILE_BEQDATE: case FILE_QLDATE: case FILE_LEQLDATE: case FILE_BEQLDATE: case FILE_QWDATE: case FILE_LEQWDATE: case FILE_BEQWDATE: case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: val += 8 * MULT; break; case FILE_INDIRECT: case FILE_NAME: case FILE_USE: break; default: (void)fprintf(stderr, "Bad type %d\n", m->type); abort(); } switch (m->reln) { case 'x': /* matches anything penalize */ case '!': /* matches almost anything penalize */ val = 0; break; case '=': /* Exact match, prefer */ val += MULT; break; case '>': case '<': /* comparison match reduce strength */ val -= 2 * MULT; break; case '^': case '&': /* masking bits, we could count them too */ val -= MULT; break; default: (void)fprintf(stderr, "Bad relation %c\n", m->reln); abort(); } if (val == 0) /* ensure we only return 0 for FILE_DEFAULT */ val = 1; switch (m->factor_op) { case FILE_FACTOR_OP_NONE: break; case FILE_FACTOR_OP_PLUS: val += m->factor; break; case FILE_FACTOR_OP_MINUS: val -= m->factor; break; case FILE_FACTOR_OP_TIMES: val *= m->factor; break; case FILE_FACTOR_OP_DIV: val /= m->factor; break; default: abort(); } /* * Magic entries with no description get a bonus because they depend * on subsequent magic entries to print something. */ if (m->desc[0] == '\0') val++; return val; } /* * Sort callback for sorting entries by "strength" (basically length) */ private int apprentice_sort(const void *a, const void *b) { const struct magic_entry *ma = CAST(const struct magic_entry *, a); const struct magic_entry *mb = CAST(const struct magic_entry *, b); size_t sa = apprentice_magic_strength(ma->mp); size_t sb = apprentice_magic_strength(mb->mp); if (sa == sb) return 0; else if (sa > sb) return -1; else return 1; } /* * Shows sorted patterns list in the order which is used for the matching */ private void apprentice_list(struct mlist *mlist, int mode) { uint32_t magindex = 0; struct mlist *ml; for (ml = mlist->next; ml != mlist; ml = ml->next) { for (magindex = 0; magindex < ml->nmagic; magindex++) { struct magic *m = &ml->magic[magindex]; if ((m->flag & mode) != mode) { /* Skip sub-tests */ while (magindex + 1 < ml->nmagic && ml->magic[magindex + 1].cont_level != 0) ++magindex; continue; /* Skip to next top-level test*/ } /* * Try to iterate over the tree until we find item with * description/mimetype. */ while (magindex + 1 < ml->nmagic && ml->magic[magindex + 1].cont_level != 0 && *ml->magic[magindex].desc == '\0' && *ml->magic[magindex].mimetype == '\0') magindex++; printf("Strength = %3" SIZE_T_FORMAT "u : %s [%s]\n", apprentice_magic_strength(m), ml->magic[magindex].desc, ml->magic[magindex].mimetype); } } } private void set_test_type(struct magic *mstart, struct magic *m) { switch (m->type) { case FILE_BYTE: case FILE_SHORT: case FILE_LONG: case FILE_DATE: case FILE_BESHORT: case FILE_BELONG: case FILE_BEDATE: case FILE_LESHORT: case FILE_LELONG: case FILE_LEDATE: case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MEDATE: case FILE_MELDATE: case FILE_MELONG: case FILE_QUAD: case FILE_LEQUAD: case FILE_BEQUAD: case FILE_QDATE: case FILE_LEQDATE: case FILE_BEQDATE: case FILE_QLDATE: case FILE_LEQLDATE: case FILE_BEQLDATE: case FILE_QWDATE: case FILE_LEQWDATE: case FILE_BEQWDATE: case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: mstart->flag |= BINTEST; break; case FILE_STRING: case FILE_PSTRING: case FILE_BESTRING16: case FILE_LESTRING16: /* Allow text overrides */ if (mstart->str_flags & STRING_TEXTTEST) mstart->flag |= TEXTTEST; else mstart->flag |= BINTEST; break; case FILE_REGEX: case FILE_SEARCH: /* Check for override */ if (mstart->str_flags & STRING_BINTEST) mstart->flag |= BINTEST; if (mstart->str_flags & STRING_TEXTTEST) mstart->flag |= TEXTTEST; if (mstart->flag & (TEXTTEST|BINTEST)) break; /* binary test if pattern is not text */ if (file_looks_utf8(m->value.us, (size_t)m->vallen, NULL, NULL) <= 0) mstart->flag |= BINTEST; else mstart->flag |= TEXTTEST; break; case FILE_DEFAULT: /* can't deduce anything; we shouldn't see this at the top level anyway */ break; case FILE_INVALID: default: /* invalid search type, but no need to complain here */ break; } } private int addentry(struct magic_set *ms, struct magic_entry *me, struct magic_entry_set *mset) { size_t i = me->mp->type == FILE_NAME ? 1 : 0; if (mset[i].count == mset[i].max) { struct magic_entry *mp; mset[i].max += ALLOC_INCR; if ((mp = CAST(struct magic_entry *, realloc(mset[i].me, sizeof(*mp) * mset[i].max))) == NULL) { file_oomem(ms, sizeof(*mp) * mset[i].max); return -1; } (void)memset(&mp[mset[i].count], 0, sizeof(*mp) * ALLOC_INCR); mset[i].me = mp; } mset[i].me[mset[i].count++] = *me; memset(me, 0, sizeof(*me)); return 0; } /* * Load and parse one file. */ private void load_1(struct magic_set *ms, int action, const char *fn, int *errs, struct magic_entry_set *mset) { size_t lineno = 0, llen = 0; char *line = NULL; ssize_t len; struct magic_entry me; FILE *f = fopen(ms->file = fn, "r"); if (f == NULL) { if (errno != ENOENT) file_error(ms, errno, "cannot read magic file `%s'", fn); (*errs)++; return; } memset(&me, 0, sizeof(me)); /* read and parse this file */ for (ms->line = 1; (len = getline(&line, &llen, f)) != -1; ms->line++) { if (len == 0) /* null line, garbage, etc */ continue; if (line[len - 1] == '\n') { lineno++; line[len - 1] = '\0'; /* delete newline */ } switch (line[0]) { case '\0': /* empty, do not parse */ case '#': /* comment, do not parse */ continue; case '!': if (line[1] == ':') { size_t i; for (i = 0; bang[i].name != NULL; i++) { if ((size_t)(len - 2) > bang[i].len && memcmp(bang[i].name, line + 2, bang[i].len) == 0) break; } if (bang[i].name == NULL) { file_error(ms, 0, "Unknown !: entry `%s'", line); (*errs)++; continue; } if (me.mp == NULL) { file_error(ms, 0, "No current entry for :!%s type", bang[i].name); (*errs)++; continue; } if ((*bang[i].fun)(ms, &me, line + bang[i].len + 2) != 0) { (*errs)++; continue; } continue; } /*FALLTHROUGH*/ default: again: switch (parse(ms, &me, line, lineno, action)) { case 0: continue; case 1: (void)addentry(ms, &me, mset); goto again; default: (*errs)++; break; } } } if (me.mp) (void)addentry(ms, &me, mset); free(line); (void)fclose(f); } /* * parse a file or directory of files * const char *fn: name of magic file or directory */ private int cmpstrp(const void *p1, const void *p2) { return strcmp(*(char *const *)p1, *(char *const *)p2); } private uint32_t set_text_binary(struct magic_set *ms, struct magic_entry *me, uint32_t nme, uint32_t starttest) { static const char text[] = "text"; static const char binary[] = "binary"; static const size_t len = sizeof(text); uint32_t i = starttest; do { set_test_type(me[starttest].mp, me[i].mp); if ((ms->flags & MAGIC_DEBUG) == 0) continue; (void)fprintf(stderr, "%s%s%s: %s\n", me[i].mp->mimetype, me[i].mp->mimetype[0] == '\0' ? "" : "; ", me[i].mp->desc[0] ? me[i].mp->desc : "(no description)", me[i].mp->flag & BINTEST ? binary : text); if (me[i].mp->flag & BINTEST) { char *p = strstr(me[i].mp->desc, text); if (p && (p == me[i].mp->desc || isspace((unsigned char)p[-1])) && (p + len - me[i].mp->desc == MAXstring || (p[len] == '\0' || isspace((unsigned char)p[len])))) (void)fprintf(stderr, "*** Possible " "binary test for text type\n"); } } while (++i < nme && me[i].mp->cont_level != 0); return i; } private void set_last_default(struct magic_set *ms, struct magic_entry *me, uint32_t nme) { uint32_t i; for (i = 0; i < nme; i++) { if (me[i].mp->cont_level == 0 && me[i].mp->type == FILE_DEFAULT) { while (++i < nme) if (me[i].mp->cont_level == 0) break; if (i != nme) { /* XXX - Ugh! */ ms->line = me[i].mp->lineno; file_magwarn(ms, "level 0 \"default\" did not sort last"); } return; } } } private int coalesce_entries(struct magic_set *ms, struct magic_entry *me, uint32_t nme, struct magic **ma, uint32_t *nma) { uint32_t i, mentrycount = 0; size_t slen; for (i = 0; i < nme; i++) mentrycount += me[i].cont_count; slen = sizeof(**ma) * mentrycount; if ((*ma = CAST(struct magic *, malloc(slen))) == NULL) { file_oomem(ms, slen); return -1; } mentrycount = 0; for (i = 0; i < nme; i++) { (void)memcpy(*ma + mentrycount, me[i].mp, me[i].cont_count * sizeof(**ma)); mentrycount += me[i].cont_count; } *nma = mentrycount; return 0; } private void magic_entry_free(struct magic_entry *me, uint32_t nme) { uint32_t i; if (me == NULL) return; for (i = 0; i < nme; i++) free(me[i].mp); free(me); } private struct magic_map * apprentice_load(struct magic_set *ms, const char *fn, int action) { int errs = 0; uint32_t i, j; size_t files = 0, maxfiles = 0; char **filearr = NULL, *mfn; struct stat st; struct magic_map *map; struct magic_entry_set mset[MAGIC_SETS]; DIR *dir; struct dirent *d; memset(mset, 0, sizeof(mset)); ms->flags |= MAGIC_CHECK; /* Enable checks for parsed files */ if ((map = CAST(struct magic_map *, calloc(1, sizeof(*map)))) == NULL) { file_oomem(ms, sizeof(*map)); return NULL; } /* print silly verbose header for USG compat. */ if (action == FILE_CHECK) (void)fprintf(stderr, "%s\n", usg_hdr); /* load directory or file */ if (stat(fn, &st) == 0 && S_ISDIR(st.st_mode)) { dir = opendir(fn); if (!dir) { errs++; goto out; } while ((d = readdir(dir)) != NULL) { if (asprintf(&mfn, "%s/%s", fn, d->d_name) < 0) { file_oomem(ms, strlen(fn) + strlen(d->d_name) + 2); errs++; closedir(dir); goto out; } if (stat(mfn, &st) == -1 || !S_ISREG(st.st_mode)) { free(mfn); continue; } if (files >= maxfiles) { size_t mlen; maxfiles = (maxfiles + 1) * 2; mlen = maxfiles * sizeof(*filearr); if ((filearr = CAST(char **, realloc(filearr, mlen))) == NULL) { file_oomem(ms, mlen); free(mfn); closedir(dir); errs++; goto out; } } filearr[files++] = mfn; } closedir(dir); qsort(filearr, files, sizeof(*filearr), cmpstrp); for (i = 0; i < files; i++) { load_1(ms, action, filearr[i], &errs, mset); free(filearr[i]); } free(filearr); } else load_1(ms, action, fn, &errs, mset); if (errs) goto out; for (j = 0; j < MAGIC_SETS; j++) { /* Set types of tests */ for (i = 0; i < mset[j].count; ) { if (mset[j].me[i].mp->cont_level != 0) { i++; continue; } i = set_text_binary(ms, mset[j].me, mset[j].count, i); } qsort(mset[j].me, mset[j].count, sizeof(*mset[j].me), apprentice_sort); /* * Make sure that any level 0 "default" line is last * (if one exists). */ set_last_default(ms, mset[j].me, mset[j].count); /* coalesce per file arrays into a single one */ if (coalesce_entries(ms, mset[j].me, mset[j].count, &map->magic[j], &map->nmagic[j]) == -1) { errs++; goto out; } } out: for (j = 0; j < MAGIC_SETS; j++) magic_entry_free(mset[j].me, mset[j].count); if (errs) { apprentice_unmap(map); return NULL; } return map; } /* * extend the sign bit if the comparison is to be signed */ protected uint64_t file_signextend(struct magic_set *ms, struct magic *m, uint64_t v) { if (!(m->flag & UNSIGNED)) { switch(m->type) { /* * Do not remove the casts below. They are * vital. When later compared with the data, * the sign extension must have happened. */ case FILE_BYTE: v = (char) v; break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: v = (short) v; break; case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: v = (int32_t) v; break; case FILE_QUAD: case FILE_BEQUAD: case FILE_LEQUAD: case FILE_QDATE: case FILE_QLDATE: case FILE_QWDATE: case FILE_BEQDATE: case FILE_BEQLDATE: case FILE_BEQWDATE: case FILE_LEQDATE: case FILE_LEQLDATE: case FILE_LEQWDATE: case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: v = (int64_t) v; break; case FILE_STRING: case FILE_PSTRING: case FILE_BESTRING16: case FILE_LESTRING16: case FILE_REGEX: case FILE_SEARCH: case FILE_DEFAULT: case FILE_INDIRECT: case FILE_NAME: case FILE_USE: case FILE_CLEAR: break; default: if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "cannot happen: m->type=%d\n", m->type); return ~0U; } } return v; } private int string_modifier_check(struct magic_set *ms, struct magic *m) { if ((ms->flags & MAGIC_CHECK) == 0) return 0; if (m->type != FILE_PSTRING && (m->str_flags & PSTRING_LEN) != 0) { file_magwarn(ms, "'/BHhLl' modifiers are only allowed for pascal strings\n"); return -1; } switch (m->type) { case FILE_BESTRING16: case FILE_LESTRING16: if (m->str_flags != 0) { file_magwarn(ms, "no modifiers allowed for 16-bit strings\n"); return -1; } break; case FILE_STRING: case FILE_PSTRING: if ((m->str_flags & REGEX_OFFSET_START) != 0) { file_magwarn(ms, "'/%c' only allowed on regex and search\n", CHAR_REGEX_OFFSET_START); return -1; } break; case FILE_SEARCH: if (m->str_range == 0) { file_magwarn(ms, "missing range; defaulting to %d\n", STRING_DEFAULT_RANGE); m->str_range = STRING_DEFAULT_RANGE; return -1; } break; case FILE_REGEX: if ((m->str_flags & STRING_COMPACT_WHITESPACE) != 0) { file_magwarn(ms, "'/%c' not allowed on regex\n", CHAR_COMPACT_WHITESPACE); return -1; } if ((m->str_flags & STRING_COMPACT_OPTIONAL_WHITESPACE) != 0) { file_magwarn(ms, "'/%c' not allowed on regex\n", CHAR_COMPACT_OPTIONAL_WHITESPACE); return -1; } break; default: file_magwarn(ms, "coding error: m->type=%d\n", m->type); return -1; } return 0; } private int get_op(char c) { switch (c) { case '&': return FILE_OPAND; case '|': return FILE_OPOR; case '^': return FILE_OPXOR; case '+': return FILE_OPADD; case '-': return FILE_OPMINUS; case '*': return FILE_OPMULTIPLY; case '/': return FILE_OPDIVIDE; case '%': return FILE_OPMODULO; default: return -1; } } #ifdef ENABLE_CONDITIONALS private int get_cond(const char *l, const char **t) { static const struct cond_tbl_s { char name[8]; size_t len; int cond; } cond_tbl[] = { { "if", 2, COND_IF }, { "elif", 4, COND_ELIF }, { "else", 4, COND_ELSE }, { "", 0, COND_NONE }, }; const struct cond_tbl_s *p; for (p = cond_tbl; p->len; p++) { if (strncmp(l, p->name, p->len) == 0 && isspace((unsigned char)l[p->len])) { if (t) *t = l + p->len; break; } } return p->cond; } private int check_cond(struct magic_set *ms, int cond, uint32_t cont_level) { int last_cond; last_cond = ms->c.li[cont_level].last_cond; switch (cond) { case COND_IF: if (last_cond != COND_NONE && last_cond != COND_ELIF) { if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "syntax error: `if'"); return -1; } last_cond = COND_IF; break; case COND_ELIF: if (last_cond != COND_IF && last_cond != COND_ELIF) { if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "syntax error: `elif'"); return -1; } last_cond = COND_ELIF; break; case COND_ELSE: if (last_cond != COND_IF && last_cond != COND_ELIF) { if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "syntax error: `else'"); return -1; } last_cond = COND_NONE; break; case COND_NONE: last_cond = COND_NONE; break; } ms->c.li[cont_level].last_cond = last_cond; return 0; } #endif /* ENABLE_CONDITIONALS */ /* * parse one line from magic file, put into magic[index++] if valid */ private int parse(struct magic_set *ms, struct magic_entry *me, const char *line, size_t lineno, int action) { #ifdef ENABLE_CONDITIONALS static uint32_t last_cont_level = 0; #endif size_t i; struct magic *m; const char *l = line; char *t; int op; uint32_t cont_level; int32_t diff; cont_level = 0; /* * Parse the offset. */ while (*l == '>') { ++l; /* step over */ cont_level++; } #ifdef ENABLE_CONDITIONALS if (cont_level == 0 || cont_level > last_cont_level) if (file_check_mem(ms, cont_level) == -1) return -1; last_cont_level = cont_level; #endif if (cont_level != 0) { if (me->mp == NULL) { file_magerror(ms, "No current entry for continuation"); return -1; } if (me->cont_count == 0) { file_magerror(ms, "Continuations present with 0 count"); return -1; } m = &me->mp[me->cont_count - 1]; diff = (int32_t)cont_level - (int32_t)m->cont_level; if (diff > 1) file_magwarn(ms, "New continuation level %u is more " "than one larger than current level %u", cont_level, m->cont_level); if (me->cont_count == me->max_count) { struct magic *nm; size_t cnt = me->max_count + ALLOC_CHUNK; if ((nm = CAST(struct magic *, realloc(me->mp, sizeof(*nm) * cnt))) == NULL) { file_oomem(ms, sizeof(*nm) * cnt); return -1; } me->mp = m = nm; me->max_count = CAST(uint32_t, cnt); } m = &me->mp[me->cont_count++]; (void)memset(m, 0, sizeof(*m)); m->cont_level = cont_level; } else { static const size_t len = sizeof(*m) * ALLOC_CHUNK; if (me->mp != NULL) return 1; if ((m = CAST(struct magic *, malloc(len))) == NULL) { file_oomem(ms, len); return -1; } me->mp = m; me->max_count = ALLOC_CHUNK; (void)memset(m, 0, sizeof(*m)); m->factor_op = FILE_FACTOR_OP_NONE; m->cont_level = 0; me->cont_count = 1; } m->lineno = CAST(uint32_t, lineno); if (*l == '&') { /* m->cont_level == 0 checked below. */ ++l; /* step over */ m->flag |= OFFADD; } if (*l == '(') { ++l; /* step over */ m->flag |= INDIR; if (m->flag & OFFADD) m->flag = (m->flag & ~OFFADD) | INDIROFFADD; if (*l == '&') { /* m->cont_level == 0 checked below */ ++l; /* step over */ m->flag |= OFFADD; } } /* Indirect offsets are not valid at level 0. */ if (m->cont_level == 0 && (m->flag & (OFFADD | INDIROFFADD))) if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "relative offset at level 0"); /* get offset, then skip over it */ m->offset = (uint32_t)strtoul(l, &t, 0); if (l == t) if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "offset `%s' invalid", l); l = t; if (m->flag & INDIR) { m->in_type = FILE_LONG; m->in_offset = 0; /* * read [.lbs][+-]nnnnn) */ if (*l == '.') { l++; switch (*l) { case 'l': m->in_type = FILE_LELONG; break; case 'L': m->in_type = FILE_BELONG; break; case 'm': m->in_type = FILE_MELONG; break; case 'h': case 's': m->in_type = FILE_LESHORT; break; case 'H': case 'S': m->in_type = FILE_BESHORT; break; case 'c': case 'b': case 'C': case 'B': m->in_type = FILE_BYTE; break; case 'e': case 'f': case 'g': m->in_type = FILE_LEDOUBLE; break; case 'E': case 'F': case 'G': m->in_type = FILE_BEDOUBLE; break; case 'i': m->in_type = FILE_LEID3; break; case 'I': m->in_type = FILE_BEID3; break; default: if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "indirect offset type `%c' invalid", *l); break; } l++; } m->in_op = 0; if (*l == '~') { m->in_op |= FILE_OPINVERSE; l++; } if ((op = get_op(*l)) != -1) { m->in_op |= op; l++; } if (*l == '(') { m->in_op |= FILE_OPINDIRECT; l++; } if (isdigit((unsigned char)*l) || *l == '-') { m->in_offset = (int32_t)strtol(l, &t, 0); if (l == t) if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "in_offset `%s' invalid", l); l = t; } if (*l++ != ')' || ((m->in_op & FILE_OPINDIRECT) && *l++ != ')')) if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "missing ')' in indirect offset"); } EATAB; #ifdef ENABLE_CONDITIONALS m->cond = get_cond(l, &l); if (check_cond(ms, m->cond, cont_level) == -1) return -1; EATAB; #endif /* * Parse the type. */ if (*l == 'u') { /* * Try it as a keyword type prefixed by "u"; match what * follows the "u". If that fails, try it as an SUS * integer type. */ m->type = get_type(type_tbl, l + 1, &l); if (m->type == FILE_INVALID) { /* * Not a keyword type; parse it as an SUS type, * 'u' possibly followed by a number or C/S/L. */ m->type = get_standard_integer_type(l, &l); } /* It's unsigned. */ if (m->type != FILE_INVALID) m->flag |= UNSIGNED; } else { /* * Try it as a keyword type. If that fails, try it as * an SUS integer type if it begins with "d" or as an * SUS string type if it begins with "s". In any case, * it's not unsigned. */ m->type = get_type(type_tbl, l, &l); if (m->type == FILE_INVALID) { /* * Not a keyword type; parse it as an SUS type, * either 'd' possibly followed by a number or * C/S/L, or just 's'. */ if (*l == 'd') m->type = get_standard_integer_type(l, &l); else if (*l == 's' && !isalpha((unsigned char)l[1])) { m->type = FILE_STRING; ++l; } } } if (m->type == FILE_INVALID) { /* Not found - try it as a special keyword. */ m->type = get_type(special_tbl, l, &l); } if (m->type == FILE_INVALID) { if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "type `%s' invalid", l); return -1; } /* New-style anding: "0 byte&0x80 =0x80 dynamically linked" */ /* New and improved: ~ & | ^ + - * / % -- exciting, isn't it? */ m->mask_op = 0; if (*l == '~') { if (!IS_STRING(m->type)) m->mask_op |= FILE_OPINVERSE; else if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "'~' invalid for string types"); ++l; } m->str_range = 0; m->str_flags = m->type == FILE_PSTRING ? PSTRING_1_LE : 0; if ((op = get_op(*l)) != -1) { if (!IS_STRING(m->type)) { uint64_t val; ++l; m->mask_op |= op; val = (uint64_t)strtoull(l, &t, 0); l = t; m->num_mask = file_signextend(ms, m, val); eatsize(&l); } else if (op == FILE_OPDIVIDE) { int have_range = 0; while (!isspace((unsigned char)*++l)) { switch (*l) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (have_range && (ms->flags & MAGIC_CHECK)) file_magwarn(ms, "multiple ranges"); have_range = 1; m->str_range = CAST(uint32_t, strtoul(l, &t, 0)); if (m->str_range == 0) file_magwarn(ms, "zero range"); l = t - 1; break; case CHAR_COMPACT_WHITESPACE: m->str_flags |= STRING_COMPACT_WHITESPACE; break; case CHAR_COMPACT_OPTIONAL_WHITESPACE: m->str_flags |= STRING_COMPACT_OPTIONAL_WHITESPACE; break; case CHAR_IGNORE_LOWERCASE: m->str_flags |= STRING_IGNORE_LOWERCASE; break; case CHAR_IGNORE_UPPERCASE: m->str_flags |= STRING_IGNORE_UPPERCASE; break; case CHAR_REGEX_OFFSET_START: m->str_flags |= REGEX_OFFSET_START; break; case CHAR_BINTEST: m->str_flags |= STRING_BINTEST; break; case CHAR_TEXTTEST: m->str_flags |= STRING_TEXTTEST; break; case CHAR_TRIM: m->str_flags |= STRING_TRIM; break; case CHAR_PSTRING_1_LE: if (m->type != FILE_PSTRING) goto bad; m->str_flags = (m->str_flags & ~PSTRING_LEN) | PSTRING_1_LE; break; case CHAR_PSTRING_2_BE: if (m->type != FILE_PSTRING) goto bad; m->str_flags = (m->str_flags & ~PSTRING_LEN) | PSTRING_2_BE; break; case CHAR_PSTRING_2_LE: if (m->type != FILE_PSTRING) goto bad; m->str_flags = (m->str_flags & ~PSTRING_LEN) | PSTRING_2_LE; break; case CHAR_PSTRING_4_BE: if (m->type != FILE_PSTRING) goto bad; m->str_flags = (m->str_flags & ~PSTRING_LEN) | PSTRING_4_BE; break; case CHAR_PSTRING_4_LE: if (m->type != FILE_PSTRING) goto bad; m->str_flags = (m->str_flags & ~PSTRING_LEN) | PSTRING_4_LE; break; case CHAR_PSTRING_LENGTH_INCLUDES_ITSELF: if (m->type != FILE_PSTRING) goto bad; m->str_flags |= PSTRING_LENGTH_INCLUDES_ITSELF; break; default: bad: if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "string extension `%c' " "invalid", *l); return -1; } /* allow multiple '/' for readability */ if (l[1] == '/' && !isspace((unsigned char)l[2])) l++; } if (string_modifier_check(ms, m) == -1) return -1; } else { if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "invalid string op: %c", *t); return -1; } } /* * We used to set mask to all 1's here, instead let's just not do * anything if mask = 0 (unless you have a better idea) */ EATAB; switch (*l) { case '>': case '<': m->reln = *l; ++l; if (*l == '=') { if (ms->flags & MAGIC_CHECK) { file_magwarn(ms, "%c= not supported", m->reln); return -1; } ++l; } break; /* Old-style anding: "0 byte &0x80 dynamically linked" */ case '&': case '^': case '=': m->reln = *l; ++l; if (*l == '=') { /* HP compat: ignore &= etc. */ ++l; } break; case '!': m->reln = *l; ++l; break; default: m->reln = '='; /* the default relation */ if (*l == 'x' && ((isascii((unsigned char)l[1]) && isspace((unsigned char)l[1])) || !l[1])) { m->reln = *l; ++l; } break; } /* * Grab the value part, except for an 'x' reln. */ if (m->reln != 'x' && getvalue(ms, m, &l, action)) return -1; /* * TODO finish this macro and start using it! * #define offsetcheck {if (offset > HOWMANY-1) * magwarn("offset too big"); } */ /* * Now get last part - the description */ EATAB; if (l[0] == '\b') { ++l; m->flag |= NOSPACE; } else if ((l[0] == '\\') && (l[1] == 'b')) { ++l; ++l; m->flag |= NOSPACE; } for (i = 0; (m->desc[i++] = *l++) != '\0' && i < sizeof(m->desc); ) continue; if (i == sizeof(m->desc)) { m->desc[sizeof(m->desc) - 1] = '\0'; if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "description `%s' truncated", m->desc); } /* * We only do this check while compiling, or if any of the magic * files were not compiled. */ if (ms->flags & MAGIC_CHECK) { if (check_format(ms, m) == -1) return -1; } #ifndef COMPILE_ONLY if (action == FILE_CHECK) { file_mdump(m); } #endif m->mimetype[0] = '\0'; /* initialise MIME type to none */ return 0; } /* * parse a STRENGTH annotation line from magic file, put into magic[index - 1] * if valid */ private int parse_strength(struct magic_set *ms, struct magic_entry *me, const char *line) { const char *l = line; char *el; unsigned long factor; struct magic *m = &me->mp[0]; if (m->factor_op != FILE_FACTOR_OP_NONE) { file_magwarn(ms, "Current entry already has a strength type: %c %d", m->factor_op, m->factor); return -1; } if (m->type == FILE_NAME) { file_magwarn(ms, "%s: Strength setting is not supported in " "\"name\" magic entries", m->value.s); return -1; } EATAB; switch (*l) { case FILE_FACTOR_OP_NONE: case FILE_FACTOR_OP_PLUS: case FILE_FACTOR_OP_MINUS: case FILE_FACTOR_OP_TIMES: case FILE_FACTOR_OP_DIV: m->factor_op = *l++; break; default: file_magwarn(ms, "Unknown factor op `%c'", *l); return -1; } EATAB; factor = strtoul(l, &el, 0); if (factor > 255) { file_magwarn(ms, "Too large factor `%lu'", factor); goto out; } if (*el && !isspace((unsigned char)*el)) { file_magwarn(ms, "Bad factor `%s'", l); goto out; } m->factor = (uint8_t)factor; if (m->factor == 0 && m->factor_op == FILE_FACTOR_OP_DIV) { file_magwarn(ms, "Cannot have factor op `%c' and factor %u", m->factor_op, m->factor); goto out; } return 0; out: m->factor_op = FILE_FACTOR_OP_NONE; m->factor = 0; return -1; } private int parse_extra(struct magic_set *ms, struct magic_entry *me, const char *line, off_t off, size_t len, const char *name, int nt) { size_t i; const char *l = line; struct magic *m = &me->mp[me->cont_count == 0 ? 0 : me->cont_count - 1]; char *buf = (char *)m + off; if (buf[0] != '\0') { len = nt ? strlen(buf) : len; file_magwarn(ms, "Current entry already has a %s type " "`%.*s', new type `%s'", name, (int)len, buf, l); return -1; } if (*m->desc == '\0') { file_magwarn(ms, "Current entry does not yet have a " "description for adding a %s type", name); return -1; } EATAB; for (i = 0; *l && ((isascii((unsigned char)*l) && isalnum((unsigned char)*l)) || strchr("-+/.", *l)) && i < len; buf[i++] = *l++) continue; if (i == len && *l) { if (nt) buf[len - 1] = '\0'; if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "%s type `%s' truncated %" SIZE_T_FORMAT "u", name, line, i); } else { if (nt) buf[i] = '\0'; } if (i > 0) return 0; else return -1; } /* * Parse an Apple CREATOR/TYPE annotation from magic file and put it into * magic[index - 1] */ private int parse_apple(struct magic_set *ms, struct magic_entry *me, const char *line) { struct magic *m = &me->mp[0]; return parse_extra(ms, me, line, offsetof(struct magic, apple), sizeof(m->apple), "APPLE", 0); } /* * parse a MIME annotation line from magic file, put into magic[index - 1] * if valid */ private int parse_mime(struct magic_set *ms, struct magic_entry *me, const char *line) { struct magic *m = &me->mp[0]; return parse_extra(ms, me, line, offsetof(struct magic, mimetype), sizeof(m->mimetype), "MIME", 1); } private int check_format_type(const char *ptr, int type) { int quad = 0, h; if (*ptr == '\0') { /* Missing format string; bad */ return -1; } switch (file_formats[type]) { case FILE_FMT_QUAD: quad = 1; /*FALLTHROUGH*/ case FILE_FMT_NUM: if (quad == 0) { switch (type) { case FILE_BYTE: h = 2; break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: h = 1; break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: case FILE_LEID3: case FILE_BEID3: case FILE_INDIRECT: h = 0; break; default: abort(); } } else h = 0; if (*ptr == '-') ptr++; if (*ptr == '.') ptr++; while (isdigit((unsigned char)*ptr)) ptr++; if (*ptr == '.') ptr++; while (isdigit((unsigned char)*ptr)) ptr++; if (quad) { if (*ptr++ != 'l') return -1; if (*ptr++ != 'l') return -1; } switch (*ptr++) { #ifdef STRICT_FORMAT /* "long" formats are int formats for us */ /* so don't accept the 'l' modifier */ case 'l': switch (*ptr++) { case 'i': case 'd': case 'u': case 'o': case 'x': case 'X': return h != 0 ? -1 : 0; default: return -1; } /* * Don't accept h and hh modifiers. They make writing * magic entries more complicated, for very little benefit */ case 'h': if (h-- <= 0) return -1; switch (*ptr++) { case 'h': if (h-- <= 0) return -1; switch (*ptr++) { case 'i': case 'd': case 'u': case 'o': case 'x': case 'X': return 0; default: return -1; } case 'i': case 'd': case 'u': case 'o': case 'x': case 'X': return h != 0 ? -1 : 0; default: return -1; } #endif case 'c': return h != 2 ? -1 : 0; case 'i': case 'd': case 'u': case 'o': case 'x': case 'X': #ifdef STRICT_FORMAT return h != 0 ? -1 : 0; #else return 0; #endif default: return -1; } case FILE_FMT_FLOAT: case FILE_FMT_DOUBLE: if (*ptr == '-') ptr++; if (*ptr == '.') ptr++; while (isdigit((unsigned char)*ptr)) ptr++; if (*ptr == '.') ptr++; while (isdigit((unsigned char)*ptr)) ptr++; switch (*ptr++) { case 'e': case 'E': case 'f': case 'F': case 'g': case 'G': return 0; default: return -1; } case FILE_FMT_STR: if (*ptr == '-') ptr++; while (isdigit((unsigned char )*ptr)) ptr++; if (*ptr == '.') { ptr++; while (isdigit((unsigned char )*ptr)) ptr++; } switch (*ptr++) { case 's': return 0; default: return -1; } default: /* internal error */ abort(); } /*NOTREACHED*/ return -1; } /* * Check that the optional printf format in description matches * the type of the magic. */ private int check_format(struct magic_set *ms, struct magic *m) { char *ptr; for (ptr = m->desc; *ptr; ptr++) if (*ptr == '%') break; if (*ptr == '\0') { /* No format string; ok */ return 1; } assert(file_nformats == file_nnames); if (m->type >= file_nformats) { file_magwarn(ms, "Internal error inconsistency between " "m->type and format strings"); return -1; } if (file_formats[m->type] == FILE_FMT_NONE) { file_magwarn(ms, "No format string for `%s' with description " "`%s'", m->desc, file_names[m->type]); return -1; } ptr++; if (check_format_type(ptr, m->type) == -1) { /* * TODO: this error message is unhelpful if the format * string is not one character long */ file_magwarn(ms, "Printf format `%c' is not valid for type " "`%s' in description `%s'", *ptr ? *ptr : '?', file_names[m->type], m->desc); return -1; } for (; *ptr; ptr++) { if (*ptr == '%') { file_magwarn(ms, "Too many format strings (should have at most one) " "for `%s' with description `%s'", file_names[m->type], m->desc); return -1; } } return 0; } /* * Read a numeric value from a pointer, into the value union of a magic * pointer, according to the magic type. Update the string pointer to point * just after the number read. Return 0 for success, non-zero for failure. */ private int getvalue(struct magic_set *ms, struct magic *m, const char **p, int action) { switch (m->type) { case FILE_BESTRING16: case FILE_LESTRING16: case FILE_STRING: case FILE_PSTRING: case FILE_REGEX: case FILE_SEARCH: case FILE_NAME: case FILE_USE: *p = getstr(ms, m, *p, action == FILE_COMPILE); if (*p == NULL) { if (ms->flags & MAGIC_CHECK) file_magwarn(ms, "cannot get string from `%s'", m->value.s); return -1; } if (m->type == FILE_REGEX) { file_regex_t rx; int rc = file_regcomp(&rx, m->value.s, REG_EXTENDED); if (rc) { if (ms->flags & MAGIC_CHECK) file_regerror(&rx, rc, ms); } file_regfree(&rx); return rc ? -1 : 0; } return 0; case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: if (m->reln != 'x') { char *ep; #ifdef HAVE_STRTOF m->value.f = strtof(*p, &ep); #else m->value.f = (float)strtod(*p, &ep); #endif *p = ep; } return 0; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: if (m->reln != 'x') { char *ep; m->value.d = strtod(*p, &ep); *p = ep; } return 0; default: if (m->reln != 'x') { char *ep; m->value.q = file_signextend(ms, m, (uint64_t)strtoull(*p, &ep, 0)); *p = ep; eatsize(p); } return 0; } } /* * Convert a string containing C character escapes. Stop at an unescaped * space or tab. * Copy the converted version to "m->value.s", and the length in m->vallen. * Return updated scan pointer as function result. Warn if set. */ private const char * getstr(struct magic_set *ms, struct magic *m, const char *s, int warn) { const char *origs = s; char *p = m->value.s; size_t plen = sizeof(m->value.s); char *origp = p; char *pmax = p + plen - 1; int c; int val; while ((c = *s++) != '\0') { if (isspace((unsigned char) c)) break; if (p >= pmax) { file_error(ms, 0, "string too long: `%s'", origs); return NULL; } if (c == '\\') { switch(c = *s++) { case '\0': if (warn) file_magwarn(ms, "incomplete escape"); goto out; case '\t': if (warn) { file_magwarn(ms, "escaped tab found, use \\t instead"); warn = 0; /* already did */ } /*FALLTHROUGH*/ default: if (warn) { if (isprint((unsigned char)c)) { /* Allow escaping of * ``relations'' */ if (strchr("<>&^=!", c) == NULL && (m->type != FILE_REGEX || strchr("[]().*?^$|{}", c) == NULL)) { file_magwarn(ms, "no " "need to escape " "`%c'", c); } } else { file_magwarn(ms, "unknown escape sequence: " "\\%03o", c); } } /*FALLTHROUGH*/ /* space, perhaps force people to use \040? */ case ' ': #if 0 /* * Other things people escape, but shouldn't need to, * so we disallow them */ case '\'': case '"': case '?': #endif /* Relations */ case '>': case '<': case '&': case '^': case '=': case '!': /* and baskslash itself */ case '\\': *p++ = (char) c; break; case 'a': *p++ = '\a'; break; case 'b': *p++ = '\b'; break; case 'f': *p++ = '\f'; break; case 'n': *p++ = '\n'; break; case 'r': *p++ = '\r'; break; case 't': *p++ = '\t'; break; case 'v': *p++ = '\v'; break; /* \ and up to 3 octal digits */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': val = c - '0'; c = *s++; /* try for 2 */ if (c >= '0' && c <= '7') { val = (val << 3) | (c - '0'); c = *s++; /* try for 3 */ if (c >= '0' && c <= '7') val = (val << 3) | (c-'0'); else --s; } else --s; *p++ = (char)val; break; /* \x and up to 2 hex digits */ case 'x': val = 'x'; /* Default if no digits */ c = hextoint(*s++); /* Get next char */ if (c >= 0) { val = c; c = hextoint(*s++); if (c >= 0) val = (val << 4) + c; else --s; } else --s; *p++ = (char)val; break; } } else *p++ = (char)c; } out: *p = '\0'; m->vallen = CAST(unsigned char, (p - origp)); if (m->type == FILE_PSTRING) m->vallen += (unsigned char)file_pstring_length_size(m); return s; } /* Single hex char to int; -1 if not a hex char. */ private int hextoint(int c) { if (!isascii((unsigned char) c)) return -1; if (isdigit((unsigned char) c)) return c - '0'; if ((c >= 'a') && (c <= 'f')) return c + 10 - 'a'; if (( c>= 'A') && (c <= 'F')) return c + 10 - 'A'; return -1; } /* * Print a string containing C character escapes. */ protected void file_showstr(FILE *fp, const char *s, size_t len) { char c; for (;;) { if (len == ~0U) { c = *s++; if (c == '\0') break; } else { if (len-- == 0) break; c = *s++; } if (c >= 040 && c <= 0176) /* TODO isprint && !iscntrl */ (void) fputc(c, fp); else { (void) fputc('\\', fp); switch (c) { case '\a': (void) fputc('a', fp); break; case '\b': (void) fputc('b', fp); break; case '\f': (void) fputc('f', fp); break; case '\n': (void) fputc('n', fp); break; case '\r': (void) fputc('r', fp); break; case '\t': (void) fputc('t', fp); break; case '\v': (void) fputc('v', fp); break; default: (void) fprintf(fp, "%.3o", c & 0377); break; } } } } /* * eatsize(): Eat the size spec from a number [eg. 10UL] */ private void eatsize(const char **p) { const char *l = *p; if (LOWCASE(*l) == 'u') l++; switch (LOWCASE(*l)) { case 'l': /* long */ case 's': /* short */ case 'h': /* short */ case 'b': /* char/byte */ case 'c': /* char/byte */ l++; /*FALLTHROUGH*/ default: break; } *p = l; } /* * handle a compiled file. */ private struct magic_map * apprentice_map(struct magic_set *ms, const char *fn) { int fd; struct stat st; uint32_t *ptr; uint32_t version, entries, nentries; int needsbyteswap; char *dbname = NULL; struct magic_map *map; size_t i; fd = -1; if ((map = CAST(struct magic_map *, calloc(1, sizeof(*map)))) == NULL) { file_oomem(ms, sizeof(*map)); goto error; } dbname = mkdbname(ms, fn, 0); if (dbname == NULL) goto error; if ((fd = open(dbname, O_RDONLY|O_BINARY)) == -1) goto error; if (fstat(fd, &st) == -1) { file_error(ms, errno, "cannot stat `%s'", dbname); goto error; } if (st.st_size < 8 || st.st_size > MAXMAGIC_SIZE) { file_error(ms, 0, "file `%s' is too %s", dbname, st.st_size < 8 ? "small" : "large"); goto error; } map->len = (size_t)st.st_size; #ifdef QUICK if ((map->p = mmap(0, (size_t)st.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FILE, fd, (off_t)0)) == MAP_FAILED) { file_error(ms, errno, "cannot map `%s'", dbname); goto error; } #else if ((map->p = CAST(void *, malloc(map->len))) == NULL) { file_oomem(ms, map->len); goto error; } if (read(fd, map->p, map->len) != (ssize_t)map->len) { file_badread(ms); goto error; } map->len = 0; #define RET 1 #endif (void)close(fd); fd = -1; ptr = CAST(uint32_t *, map->p); if (*ptr != MAGICNO) { if (swap4(*ptr) != MAGICNO) { file_error(ms, 0, "bad magic in `%s'", dbname); goto error; } needsbyteswap = 1; } else needsbyteswap = 0; if (needsbyteswap) version = swap4(ptr[1]); else version = ptr[1]; if (version != VERSIONNO) { file_error(ms, 0, "File %s supports only version %d magic " "files. `%s' is version %d", VERSION, VERSIONNO, dbname, version); goto error; } entries = (uint32_t)(st.st_size / sizeof(struct magic)); if ((off_t)(entries * sizeof(struct magic)) != st.st_size) { file_error(ms, 0, "Size of `%s' %" INT64_T_FORMAT "u is not " "a multiple of %" SIZE_T_FORMAT "u", dbname, (unsigned long long)st.st_size, sizeof(struct magic)); goto error; } map->magic[0] = CAST(struct magic *, map->p) + 1; nentries = 0; for (i = 0; i < MAGIC_SETS; i++) { if (needsbyteswap) map->nmagic[i] = swap4(ptr[i + 2]); else map->nmagic[i] = ptr[i + 2]; if (i != MAGIC_SETS - 1) map->magic[i + 1] = map->magic[i] + map->nmagic[i]; nentries += map->nmagic[i]; } if (entries != nentries + 1) { file_error(ms, 0, "Inconsistent entries in `%s' %u != %u", dbname, entries, nentries + 1); goto error; } if (needsbyteswap) for (i = 0; i < MAGIC_SETS; i++) byteswap(map->magic[i], map->nmagic[i]); free(dbname); return map; error: if (fd != -1) (void)close(fd); apprentice_unmap(map); free(dbname); return NULL; } /* * handle an mmaped file. */ private int apprentice_compile(struct magic_set *ms, struct magic_map *map, const char *fn) { static const size_t nm = sizeof(*map->nmagic) * MAGIC_SETS; static const size_t m = sizeof(**map->magic); int fd = -1; size_t len; char *dbname; int rv = -1; uint32_t i; union { struct magic m; uint32_t h[2 + MAGIC_SETS]; } hdr; dbname = mkdbname(ms, fn, 1); if (dbname == NULL) goto out; if ((fd = open(dbname, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0644)) == -1) { file_error(ms, errno, "cannot open `%s'", dbname); goto out; } memset(&hdr, 0, sizeof(hdr)); hdr.h[0] = MAGICNO; hdr.h[1] = VERSIONNO; memcpy(hdr.h + 2, map->nmagic, nm); if (write(fd, &hdr, sizeof(hdr)) != (ssize_t)sizeof(hdr)) { file_error(ms, errno, "error writing `%s'", dbname); goto out; } for (i = 0; i < MAGIC_SETS; i++) { len = m * map->nmagic[i]; if (write(fd, map->magic[i], len) != (ssize_t)len) { file_error(ms, errno, "error writing `%s'", dbname); goto out; } } if (fd != -1) (void)close(fd); rv = 0; out: free(dbname); return rv; } private const char ext[] = ".mgc"; /* * make a dbname */ private char * mkdbname(struct magic_set *ms, const char *fn, int strip) { const char *p, *q; char *buf; if (strip) { if ((p = strrchr(fn, '/')) != NULL) fn = ++p; } for (q = fn; *q; q++) continue; /* Look for .mgc */ for (p = ext + sizeof(ext) - 1; p >= ext && q >= fn; p--, q--) if (*p != *q) break; /* Did not find .mgc, restore q */ if (p >= ext) while (*q) q++; q++; /* Compatibility with old code that looked in .mime */ if (ms->flags & MAGIC_MIME) { if (asprintf(&buf, "%.*s.mime%s", (int)(q - fn), fn, ext) < 0) return NULL; if (access(buf, R_OK) != -1) { ms->flags &= MAGIC_MIME_TYPE; return buf; } free(buf); } if (asprintf(&buf, "%.*s%s", (int)(q - fn), fn, ext) < 0) return NULL; /* Compatibility with old code that looked in .mime */ if (strstr(p, ".mime") != NULL) ms->flags &= MAGIC_MIME_TYPE; return buf; } /* * Byteswap an mmap'ed file if needed */ private void byteswap(struct magic *magic, uint32_t nmagic) { uint32_t i; for (i = 0; i < nmagic; i++) bs1(&magic[i]); } /* * swap a short */ private uint16_t swap2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ private uint32_t swap4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ private uint64_t swap8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; #if 0 d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; d[4] = s[7]; d[5] = s[6]; d[6] = s[5]; d[7] = s[4]; #else d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; #endif return rv; } /* * byteswap a single magic entry */ private void bs1(struct magic *m) { m->cont_level = swap2(m->cont_level); m->offset = swap4((uint32_t)m->offset); m->in_offset = swap4((uint32_t)m->in_offset); m->lineno = swap4((uint32_t)m->lineno); if (IS_STRING(m->type)) { m->str_range = swap4(m->str_range); m->str_flags = swap4(m->str_flags); } else { m->value.q = swap8(m->value.q); m->num_mask = swap8(m->num_mask); } } protected size_t file_pstring_length_size(const struct magic *m) { switch (m->str_flags & PSTRING_LEN) { case PSTRING_1_LE: return 1; case PSTRING_2_LE: case PSTRING_2_BE: return 2; case PSTRING_4_LE: case PSTRING_4_BE: return 4; default: abort(); /* Impossible */ return 1; } } protected size_t file_pstring_get_length(const struct magic *m, const char *s) { size_t len = 0; switch (m->str_flags & PSTRING_LEN) { case PSTRING_1_LE: len = *s; break; case PSTRING_2_LE: len = (s[1] << 8) | s[0]; break; case PSTRING_2_BE: len = (s[0] << 8) | s[1]; break; case PSTRING_4_LE: len = (s[3] << 24) | (s[2] << 16) | (s[1] << 8) | s[0]; break; case PSTRING_4_BE: len = (s[0] << 24) | (s[1] << 16) | (s[2] << 8) | s[3]; break; default: abort(); /* Impossible */ } if (m->str_flags & PSTRING_LENGTH_INCLUDES_ITSELF) len -= file_pstring_length_size(m); return len; } protected int file_magicfind(struct magic_set *ms, const char *name, struct mlist *v) { uint32_t i, j; struct mlist *mlist, *ml; mlist = ms->mlist[1]; for (ml = mlist->next; ml != mlist; ml = ml->next) { struct magic *ma = ml->magic; uint32_t nma = ml->nmagic; for (i = 0; i < nma; i++) { if (ma[i].type != FILE_NAME) continue; if (strcmp(ma[i].value.s, name) == 0) { v->magic = &ma[i]; for (j = i + 1; j < nma; j++) if (ma[j].cont_level == 0) break; v->nmagic = j - i; return 0; } } } return -1; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2144_8
crossvul-cpp_data_good_4966_4
/* * NET3: Garbage Collector For AF_UNIX sockets * * Garbage Collector: * Copyright (C) Barak A. Pearlmutter. * Released under the GPL version 2 or later. * * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. * If it doesn't work blame me, it worked when Barak sent it. * * Assumptions: * * - object w/ a bit * - free list * * Current optimizations: * * - explicit stack instead of recursion * - tail recurse on first born instead of immediate push/pop * - we gather the stuff that should not be killed into tree * and stack is just a path from root to the current pointer. * * Future optimizations: * * - don't just push entire root set; process in place * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. * Cope with changing max_files. * Al Viro 11 Oct 1998 * Graph may have cycles. That is, we can send the descriptor * of foo to bar and vice versa. Current code chokes on that. * Fix: move SCM_RIGHTS ones into the separate list and then * skb_free() them all instead of doing explicit fput's. * Another problem: since fput() may block somebody may * create a new unix_socket when we are in the middle of sweep * phase. Fix: revert the logic wrt MARKED. Mark everything * upon the beginning and unmark non-junk ones. * * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS * sent to connect()'ed but still not accept()'ed sockets. * Fixed. Old code had slightly different problem here: * extra fput() in situation when we passed the descriptor via * such socket and closed it (descriptor). That would happen on * each unix_gc() until the accept(). Since the struct file in * question would go to the free list and might be reused... * That might be the reason of random oopses on filp_close() * in unrelated processes. * * AV 28 Feb 1999 * Kill the explicit allocation of stack. Now we keep the tree * with root in dummy + pointer (gc_current) to one of the nodes. * Stack is represented as path from gc_current to dummy. Unmark * now means "add to tree". Push == "make it a son of gc_current". * Pop == "move gc_current to parent". We keep only pointers to * parents (->gc_tree). * AV 1 Mar 1999 * Damn. Added missing check for ->dead in listen queues scanning. * * Miklos Szeredi 25 Jun 2007 * Reimplement with a cycle collecting algorithm. This should * solve several problems with the previous code, like being racy * wrt receive and holding up unrelated socket operations. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/un.h> #include <linux/net.h> #include <linux/fs.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/file.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/wait.h> #include <net/sock.h> #include <net/af_unix.h> #include <net/scm.h> #include <net/tcp_states.h> /* Internal data structures and random procedures: */ static LIST_HEAD(gc_inflight_list); static LIST_HEAD(gc_candidates); static DEFINE_SPINLOCK(unix_gc_lock); static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); unsigned int unix_tot_inflight; struct sock *unix_get_socket(struct file *filp) { struct sock *u_sock = NULL; struct inode *inode = file_inode(filp); /* Socket ? */ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { struct socket *sock = SOCKET_I(inode); struct sock *s = sock->sk; /* PF_UNIX ? */ if (s && sock->ops && sock->ops->family == PF_UNIX) u_sock = s; } return u_sock; } /* Keep the number of times in flight count for the file * descriptor if it is for an AF_UNIX socket. */ void unix_inflight(struct user_struct *user, struct file *fp) { struct sock *s = unix_get_socket(fp); spin_lock(&unix_gc_lock); if (s) { struct unix_sock *u = unix_sk(s); if (atomic_long_inc_return(&u->inflight) == 1) { BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &gc_inflight_list); } else { BUG_ON(list_empty(&u->link)); } unix_tot_inflight++; } user->unix_inflight++; spin_unlock(&unix_gc_lock); } void unix_notinflight(struct user_struct *user, struct file *fp) { struct sock *s = unix_get_socket(fp); spin_lock(&unix_gc_lock); if (s) { struct unix_sock *u = unix_sk(s); BUG_ON(list_empty(&u->link)); if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); unix_tot_inflight--; } user->unix_inflight--; spin_unlock(&unix_gc_lock); } static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; struct sk_buff *next; spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { /* Do we have file descriptors ? */ if (UNIXCB(skb).fp) { bool hit = false; /* Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while (nfd--) { /* Get the socket the fd matches if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); if (sk) { struct unix_sock *u = unix_sk(sk); /* Ignore non-candidates, they could * have been added to the queues after * starting the garbage collection */ if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { hit = true; func(u); } } } if (hit && hitlist != NULL) { __skb_unlink(skb, &x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); } static void scan_children(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { if (x->sk_state != TCP_LISTEN) { scan_inflight(x, func, hitlist); } else { struct sk_buff *skb; struct sk_buff *next; struct unix_sock *u; LIST_HEAD(embryos); /* For a listening socket collect the queued embryos * and perform a scan on them as well. */ spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { u = unix_sk(skb->sk); /* An embryo cannot be in-flight, so it's safe * to use the list link. */ BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &embryos); } spin_unlock(&x->sk_receive_queue.lock); while (!list_empty(&embryos)) { u = list_entry(embryos.next, struct unix_sock, link); scan_inflight(&u->sk, func, hitlist); list_del_init(&u->link); } } } static void dec_inflight(struct unix_sock *usk) { atomic_long_dec(&usk->inflight); } static void inc_inflight(struct unix_sock *usk) { atomic_long_inc(&usk->inflight); } static void inc_inflight_move_tail(struct unix_sock *u) { atomic_long_inc(&u->inflight); /* If this still might be part of a cycle, move it to the end * of the list, so that it's checked even if it was already * passed over */ if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) list_move_tail(&u->link, &gc_candidates); } static bool gc_in_progress; #define UNIX_INFLIGHT_TRIGGER_GC 16000 void wait_for_unix_gc(void) { /* If number of inflight sockets is insane, * force a garbage collect right now. */ if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); } /* The external entry point: unix_gc() */ void unix_gc(void) { struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; LIST_HEAD(not_cycle_list); spin_lock(&unix_gc_lock); /* Avoid a recursive GC. */ if (gc_in_progress) goto out; gc_in_progress = true; /* First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external * reference. Since there are no possible receivers, all * buffers currently on the candidates' queues stay there * during the garbage collection. * * We also know that no new candidate can be added onto the * receive queues. Other, non candidate sockets _can_ be * added to queue, so we must make sure only to touch * candidates. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; long inflight_refs; total_refs = file_count(u->sk.sk_socket->file); inflight_refs = atomic_long_read(&u->inflight); BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); } } /* Now remove all internal in-flight reference to children of * the candidates. */ list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL); /* Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * * Use a "cursor" link, to make the list traversal safe, even * though elements might be moved about. */ list_add(&cursor, &gc_candidates); while (cursor.next != &gc_candidates) { u = list_entry(cursor.next, struct unix_sock, link); /* Move cursor to after the current position. */ list_move(&cursor, &u->link); if (atomic_long_read(&u->inflight) > 0) { list_move_tail(&u->link, &not_cycle_list); __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); /* not_cycle_list contains those sockets which do not make up a * cycle. Restore these to the inflight list. */ while (!list_empty(&not_cycle_list)) { u = list_entry(not_cycle_list.next, struct unix_sock, link); __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); list_move_tail(&u->link, &gc_inflight_list); } /* Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, inc_inflight, &hitlist); spin_unlock(&unix_gc_lock); /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false; wake_up(&unix_gc_wait); out: spin_unlock(&unix_gc_lock); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_4966_4
crossvul-cpp_data_bad_3486_13
/* * Performance event support - powerpc architecture code * * Copyright 2008-2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <asm/reg.h> #include <asm/pmc.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/ptrace.h> struct cpu_hw_events { int n_events; int n_percpu; int disabled; int n_added; int n_limited; u8 pmcs_enabled; struct perf_event *event[MAX_HWEVENTS]; u64 events[MAX_HWEVENTS]; unsigned int flags[MAX_HWEVENTS]; unsigned long mmcr[3]; struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; unsigned int group_flag; int n_txn_start; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); struct power_pmu *ppmu; /* * Normally, to ignore kernel events we set the FCS (freeze counters * in supervisor mode) bit in MMCR0, but if the kernel runs with the * hypervisor bit set in the MSR, or if we are running on a processor * where the hypervisor bit is forced to 1 (as on Apple G5 processors), * then we need to use the FCHV bit to ignore kernel events. */ static unsigned int freeze_events_kernel = MMCR0_FCS; /* * 32-bit doesn't have MMCRA but does have an MMCR2, * and a few other names are different. */ #ifdef CONFIG_PPC32 #define MMCR0_FCHV 0 #define MMCR0_PMCjCE MMCR0_PMCnCE #define SPRN_MMCRA SPRN_MMCR2 #define MMCRA_SAMPLE_ENABLE 0 static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { return 0; } static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } static inline u32 perf_get_misc_flags(struct pt_regs *regs) { return 0; } static inline void perf_read_regs(struct pt_regs *regs) { } static inline int perf_intr_is_nmi(struct pt_regs *regs) { return 0; } #endif /* CONFIG_PPC32 */ /* * Things that are specific to 64-bit implementations. */ #ifdef CONFIG_PPC64 static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; if (slot > 1) return 4 * (slot - 1); } return 0; } /* * The user wants a data address recorded. * If we're not doing instruction sampling, give them the SDAR * (sampled data address). If we are doing instruction sampling, then * only give them the SDAR if it corresponds to the instruction * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC * bit in MMCRA. */ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { unsigned long mmcra = regs->dsisr; unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) *addrp = mfspr(SPRN_SDAR); } static inline u32 perf_get_misc_flags(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; unsigned long sihv = MMCRA_SIHV; unsigned long sipr = MMCRA_SIPR; if (TRAP(regs) != 0xf00) return 0; /* not a PMU interrupt */ if (ppmu->flags & PPMU_ALT_SIPR) { sihv = POWER6_MMCRA_SIHV; sipr = POWER6_MMCRA_SIPR; } /* PR has priority over HV, so order below is important */ if (mmcra & sipr) return PERF_RECORD_MISC_USER; if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV)) return PERF_RECORD_MISC_HYPERVISOR; return PERF_RECORD_MISC_KERNEL; } /* * Overload regs->dsisr to store MMCRA so we only need to read it once * on each interrupt. */ static inline void perf_read_regs(struct pt_regs *regs) { regs->dsisr = mfspr(SPRN_MMCRA); } /* * If interrupts were soft-disabled when a PMU interrupt occurs, treat * it as an NMI. */ static inline int perf_intr_is_nmi(struct pt_regs *regs) { return !regs->softe; } #endif /* CONFIG_PPC64 */ static void perf_event_interrupt(struct pt_regs *regs); void perf_event_print_debug(void) { } /* * Read one performance monitor counter (PMC). */ static unsigned long read_pmc(int idx) { unsigned long val; switch (idx) { case 1: val = mfspr(SPRN_PMC1); break; case 2: val = mfspr(SPRN_PMC2); break; case 3: val = mfspr(SPRN_PMC3); break; case 4: val = mfspr(SPRN_PMC4); break; case 5: val = mfspr(SPRN_PMC5); break; case 6: val = mfspr(SPRN_PMC6); break; #ifdef CONFIG_PPC64 case 7: val = mfspr(SPRN_PMC7); break; case 8: val = mfspr(SPRN_PMC8); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to read PMC%d\n", idx); val = 0; } return val; } /* * Write one PMC. */ static void write_pmc(int idx, unsigned long val) { switch (idx) { case 1: mtspr(SPRN_PMC1, val); break; case 2: mtspr(SPRN_PMC2, val); break; case 3: mtspr(SPRN_PMC3, val); break; case 4: mtspr(SPRN_PMC4, val); break; case 5: mtspr(SPRN_PMC5, val); break; case 6: mtspr(SPRN_PMC6, val); break; #ifdef CONFIG_PPC64 case 7: mtspr(SPRN_PMC7, val); break; case 8: mtspr(SPRN_PMC8, val); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to write PMC%d\n", idx); } } /* * Check if a set of events can all go on the PMU at once. * If they can't, this will look at alternative codes for the events * and see if any combination of alternative codes is feasible. * The feasible set is returned in event_id[]. */ static int power_check_constraints(struct cpu_hw_events *cpuhw, u64 event_id[], unsigned int cflags[], int n_ev) { unsigned long mask, value, nv; unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS]; int i, j; unsigned long addf = ppmu->add_fields; unsigned long tadd = ppmu->test_adder; if (n_ev > ppmu->n_counter) return -1; /* First see if the events will go on as-is */ for (i = 0; i < n_ev; ++i) { if ((cflags[i] & PPMU_LIMITED_PMC_REQD) && !ppmu->limited_pmc_event(event_id[i])) { ppmu->get_alternatives(event_id[i], cflags[i], cpuhw->alternatives[i]); event_id[i] = cpuhw->alternatives[i][0]; } if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], &cpuhw->avalues[i][0])) return -1; } value = mask = 0; for (i = 0; i < n_ev; ++i) { nv = (value | cpuhw->avalues[i][0]) + (value & cpuhw->avalues[i][0] & addf); if ((((nv + tadd) ^ value) & mask) != 0 || (((nv + tadd) ^ cpuhw->avalues[i][0]) & cpuhw->amasks[i][0]) != 0) break; value = nv; mask |= cpuhw->amasks[i][0]; } if (i == n_ev) return 0; /* all OK */ /* doesn't work, gather alternatives... */ if (!ppmu->get_alternatives) return -1; for (i = 0; i < n_ev; ++i) { choice[i] = 0; n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i], cpuhw->alternatives[i]); for (j = 1; j < n_alt[i]; ++j) ppmu->get_constraint(cpuhw->alternatives[i][j], &cpuhw->amasks[i][j], &cpuhw->avalues[i][j]); } /* enumerate all possibilities and see if any will work */ i = 0; j = -1; value = mask = nv = 0; while (i < n_ev) { if (j >= 0) { /* we're backtracking, restore context */ value = svalues[i]; mask = smasks[i]; j = choice[i]; } /* * See if any alternative k for event_id i, * where k > j, will satisfy the constraints. */ while (++j < n_alt[i]) { nv = (value | cpuhw->avalues[i][j]) + (value & cpuhw->avalues[i][j] & addf); if ((((nv + tadd) ^ value) & mask) == 0 && (((nv + tadd) ^ cpuhw->avalues[i][j]) & cpuhw->amasks[i][j]) == 0) break; } if (j >= n_alt[i]) { /* * No feasible alternative, backtrack * to event_id i-1 and continue enumerating its * alternatives from where we got up to. */ if (--i < 0) return -1; } else { /* * Found a feasible alternative for event_id i, * remember where we got up to with this event_id, * go on to the next event_id, and start with * the first alternative for it. */ choice[i] = j; svalues[i] = value; smasks[i] = mask; value = nv; mask |= cpuhw->amasks[i][j]; ++i; j = -1; } } /* OK, we have a feasible combination, tell the caller the solution */ for (i = 0; i < n_ev; ++i) event_id[i] = cpuhw->alternatives[i][choice[i]]; return 0; } /* * Check if newly-added events have consistent settings for * exclude_{user,kernel,hv} with each other and any previously * added events. */ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], int n_prev, int n_new) { int eu = 0, ek = 0, eh = 0; int i, n, first; struct perf_event *event; n = n_prev + n_new; if (n <= 1) return 0; first = 1; for (i = 0; i < n; ++i) { if (cflags[i] & PPMU_LIMITED_PMC_OK) { cflags[i] &= ~PPMU_LIMITED_PMC_REQD; continue; } event = ctrs[i]; if (first) { eu = event->attr.exclude_user; ek = event->attr.exclude_kernel; eh = event->attr.exclude_hv; first = 0; } else if (event->attr.exclude_user != eu || event->attr.exclude_kernel != ek || event->attr.exclude_hv != eh) { return -EAGAIN; } } if (eu || ek || eh) for (i = 0; i < n; ++i) if (cflags[i] & PPMU_LIMITED_PMC_OK) cflags[i] |= PPMU_LIMITED_PMC_REQD; return 0; } static u64 check_and_compute_delta(u64 prev, u64 val) { u64 delta = (val - prev) & 0xfffffffful; /* * POWER7 can roll back counter values, if the new value is smaller * than the previous value it will cause the delta and the counter to * have bogus values unless we rolled a counter over. If a coutner is * rolled back, it will be smaller, but within 256, which is the maximum * number of events to rollback at once. If we dectect a rollback * return 0. This can lead to a small lack of precision in the * counters. */ if (prev > val && (prev - val) < 256) delta = 0; return delta; } static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; if (event->hw.state & PERF_HES_STOPPED) return; if (!event->hw.idx) return; /* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. * Therefore we treat them like NMIs. */ do { prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); delta = check_and_compute_delta(prev, val); if (!delta) return; } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } /* * On some machines, PMC5 and PMC6 can't be written, don't respect * the freeze conditions, and don't generate interrupts. This tells * us if `event' is using such a PMC. */ static int is_limited_pmc(int pmcnum) { return (ppmu->flags & PPMU_LIMITED_PMC5_6) && (pmcnum == 5 || pmcnum == 6); } static void freeze_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; u64 val, prev, delta; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; if (!event->hw.idx) continue; val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; delta = check_and_compute_delta(prev, val); if (delta) local64_add(delta, &event->count); } } static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; u64 val, prev; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); if (check_and_compute_delta(prev, val)) local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } } /* * Since limited events don't respect the freeze conditions, we * have to read them immediately after freezing or unfreezing the * other events. We try to keep the values from the limited * events as consistent as possible by keeping the delay (in * cycles and instructions) between freezing/unfreezing and reading * the limited events as small and consistent as possible. * Therefore, if any limited events are in use, we read them * both, and always in the same order, to minimize variability, * and do it inside the same asm that writes MMCR0. */ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) { unsigned long pmc5, pmc6; if (!cpuhw->n_limited) { mtspr(SPRN_MMCR0, mmcr0); return; } /* * Write MMCR0, then read PMC5 and PMC6 immediately. * To ensure we don't get a performance monitor interrupt * between writing MMCR0 and freezing/thawing the limited * events, we first write MMCR0 with the event overflow * interrupt enable bits turned off. */ asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" : "=&r" (pmc5), "=&r" (pmc6) : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), "i" (SPRN_MMCR0), "i" (SPRN_PMC5), "i" (SPRN_PMC6)); if (mmcr0 & MMCR0_FC) freeze_limited_counters(cpuhw, pmc5, pmc6); else thaw_limited_counters(cpuhw, pmc5, pmc6); /* * Write the full MMCR0 including the event overflow interrupt * enable bits, if necessary. */ if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) mtspr(SPRN_MMCR0, mmcr0); } /* * Disable all events to prevent PMU interrupts and to allow * events to be added or removed. */ static void power_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; if (!ppmu) return; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { cpuhw->disabled = 1; cpuhw->n_added = 0; /* * Check if we ever enabled the PMU on this cpu. */ if (!cpuhw->pmcs_enabled) { ppc_enable_pmcs(); cpuhw->pmcs_enabled = 1; } /* * Disable instruction sampling if it was enabled */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mb(); } /* * Set the 'freeze counters' bit. * The barrier is to make sure the mtspr has been * executed and the PMU has frozen the events * before we return. */ write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); mb(); } local_irq_restore(flags); } /* * Re-enable all events if disable == 0. * If we were previously disabled and events were added, then * put the new config on the PMU. */ static void power_pmu_enable(struct pmu *pmu) { struct perf_event *event; struct cpu_hw_events *cpuhw; unsigned long flags; long i; unsigned long val; s64 left; unsigned int hwc_index[MAX_HWEVENTS]; int n_lim; int idx; if (!ppmu) return; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { local_irq_restore(flags); return; } cpuhw->disabled = 0; /* * If we didn't change anything, or only removed events, * no need to recalculate MMCR* settings and reset the PMCs. * Just reenable the PMU with the current MMCR* settings * (possibly updated for removal of events). */ if (!cpuhw->n_added) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); if (cpuhw->n_events == 0) ppc_set_pmu_inuse(0); goto out_enable; } /* * Compute MMCR* values for the new set of events */ if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, cpuhw->mmcr)) { /* shouldn't ever get here */ printk(KERN_ERR "oops compute_mmcr failed\n"); goto out; } /* * Add in MMCR0 freeze bits corresponding to the * attr.exclude_* bits for the first event. * We have already checked that all events have the * same values for these bits as the first event. */ event = cpuhw->event[0]; if (event->attr.exclude_user) cpuhw->mmcr[0] |= MMCR0_FCP; if (event->attr.exclude_kernel) cpuhw->mmcr[0] |= freeze_events_kernel; if (event->attr.exclude_hv) cpuhw->mmcr[0] |= MMCR0_FCHV; /* * Write the new configuration to MMCR* with the freeze * bit set and set the hardware events to their initial values. * Then unfreeze the events. */ ppc_set_pmu_inuse(1); mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | MMCR0_FC); /* * Read off any pre-existing events that need to move * to another PMC. */ for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { power_pmu_read(event); write_pmc(event->hw.idx, 0); event->hw.idx = 0; } } /* * Initialize the PMCs for all the new and moved events. */ cpuhw->n_limited = n_lim = 0; for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (event->hw.idx) continue; idx = hwc_index[i] + 1; if (is_limited_pmc(idx)) { cpuhw->limited_counter[n_lim] = event; cpuhw->limited_hwidx[n_lim] = idx; ++n_lim; continue; } val = 0; if (event->hw.sample_period) { left = local64_read(&event->hw.period_left); if (left < 0x80000000L) val = 0x80000000L - left; } local64_set(&event->hw.prev_count, val); event->hw.idx = idx; if (event->hw.state & PERF_HES_STOPPED) val = 0; write_pmc(idx, val); perf_event_update_userpage(event); } cpuhw->n_limited = n_lim; cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; out_enable: mb(); write_mmcr0(cpuhw, cpuhw->mmcr[0]); /* * Enable instruction sampling if necessary */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { mb(); mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); } out: local_irq_restore(flags); } static int collect_events(struct perf_event *group, int max_count, struct perf_event *ctrs[], u64 *events, unsigned int *flags) { int n = 0; struct perf_event *event; if (!is_software_event(group)) { if (n >= max_count) return -1; ctrs[n] = group; flags[n] = group->hw.event_base; events[n++] = group->hw.config; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; ctrs[n] = event; flags[n] = event->hw.event_base; events[n++] = event->hw.config; } } return n; } /* * Add a event to the PMU. * If all events are not already frozen, then we disable and * re-enable the PMU in order to get hw_perf_enable to do the * actual work of reconfiguring the PMU. */ static int power_pmu_add(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; unsigned long flags; int n0; int ret = -EAGAIN; local_irq_save(flags); perf_pmu_disable(event->pmu); /* * Add the event to the list (if there is room) * and check whether the total set is still feasible. */ cpuhw = &__get_cpu_var(cpu_hw_events); n0 = cpuhw->n_events; if (n0 >= ppmu->n_counter) goto out; cpuhw->event[n0] = event; cpuhw->events[n0] = event->hw.config; cpuhw->flags[n0] = event->hw.event_base; if (!(ef_flags & PERF_EF_START)) event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuhw->group_flag & PERF_EVENT_TXN) goto nocheck; if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) goto out; if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) goto out; event->hw.config = cpuhw->events[n0]; nocheck: ++cpuhw->n_events; ++cpuhw->n_added; ret = 0; out: perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; } /* * Remove a event from the PMU. */ static void power_pmu_del(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; long i; unsigned long flags; local_irq_save(flags); perf_pmu_disable(event->pmu); power_pmu_read(event); cpuhw = &__get_cpu_var(cpu_hw_events); for (i = 0; i < cpuhw->n_events; ++i) { if (event == cpuhw->event[i]) { while (++i < cpuhw->n_events) { cpuhw->event[i-1] = cpuhw->event[i]; cpuhw->events[i-1] = cpuhw->events[i]; cpuhw->flags[i-1] = cpuhw->flags[i]; } --cpuhw->n_events; ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); if (event->hw.idx) { write_pmc(event->hw.idx, 0); event->hw.idx = 0; } perf_event_update_userpage(event); break; } } for (i = 0; i < cpuhw->n_limited; ++i) if (event == cpuhw->limited_counter[i]) break; if (i < cpuhw->n_limited) { while (++i < cpuhw->n_limited) { cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; } --cpuhw->n_limited; } if (cpuhw->n_events == 0) { /* disable exceptions if no events are running */ cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); } perf_pmu_enable(event->pmu); local_irq_restore(flags); } /* * POWER-PMU does not support disabling individual counters, hence * program their cycle counter to their max value and ignore the interrupts. */ static void power_pmu_start(struct perf_event *event, int ef_flags) { unsigned long flags; s64 left; if (!event->hw.idx || !event->hw.sample_period) return; if (!(event->hw.state & PERF_HES_STOPPED)) return; if (ef_flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); local_irq_save(flags); perf_pmu_disable(event->pmu); event->hw.state = 0; left = local64_read(&event->hw.period_left); write_pmc(event->hw.idx, left); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); } static void power_pmu_stop(struct perf_event *event, int ef_flags) { unsigned long flags; if (!event->hw.idx || !event->hw.sample_period) return; if (event->hw.state & PERF_HES_STOPPED) return; local_irq_save(flags); perf_pmu_disable(event->pmu); power_pmu_read(event); event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; write_pmc(event->hw.idx, 0); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); } /* * Start group events scheduling transaction * Set the flag to make pmu::enable() not perform the * schedulability test, it will be performed at commit time */ void power_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); perf_pmu_disable(pmu); cpuhw->group_flag |= PERF_EVENT_TXN; cpuhw->n_txn_start = cpuhw->n_events; } /* * Stop group events scheduling transaction * Clear the flag and pmu::enable() will perform the * schedulability test. */ void power_pmu_cancel_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); cpuhw->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); } /* * Commit group events scheduling transaction * Perform the group schedulability test as a whole * Return 0 if success */ int power_pmu_commit_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw; long i, n; if (!ppmu) return -EAGAIN; cpuhw = &__get_cpu_var(cpu_hw_events); n = cpuhw->n_events; if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) return -EAGAIN; i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); if (i < 0) return -EAGAIN; for (i = cpuhw->n_txn_start; i < n; ++i) cpuhw->event[i]->hw.config = cpuhw->events[i]; cpuhw->group_flag &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); return 0; } /* * Return 1 if we might be able to put event on a limited PMC, * or 0 if not. * A event can only go on a limited PMC if it counts something * that a limited PMC can count, doesn't require interrupts, and * doesn't exclude any processor mode. */ static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, unsigned int flags) { int n; u64 alt[MAX_EVENT_ALTERNATIVES]; if (event->attr.exclude_user || event->attr.exclude_kernel || event->attr.exclude_hv || event->attr.sample_period) return 0; if (ppmu->limited_pmc_event(ev)) return 1; /* * The requested event_id isn't on a limited PMC already; * see if any alternative code goes on a limited PMC. */ if (!ppmu->get_alternatives) return 0; flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; n = ppmu->get_alternatives(ev, flags, alt); return n > 0; } /* * Find an alternative event_id that goes on a normal PMC, if possible, * and return the event_id code, or 0 if there is no such alternative. * (Note: event_id code 0 is "don't count" on all machines.) */ static u64 normal_pmc_alternative(u64 ev, unsigned long flags) { u64 alt[MAX_EVENT_ALTERNATIVES]; int n; flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); n = ppmu->get_alternatives(ev, flags, alt); if (!n) return 0; return alt[0]; } /* Number of perf_events counting hardware events */ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); /* * Release the PMU if this is the last perf_event. */ static void hw_perf_event_destroy(struct perf_event *event) { if (!atomic_add_unless(&num_events, -1, 1)) { mutex_lock(&pmc_reserve_mutex); if (atomic_dec_return(&num_events) == 0) release_pmc_hardware(); mutex_unlock(&pmc_reserve_mutex); } } /* * Translate a generic cache event_id config to a raw event_id code. */ static int hw_perf_cache_event(u64 config, u64 *eventp) { unsigned long type, op, result; int ev; if (!ppmu->cache_events) return -EINVAL; /* unpack config */ type = config & 0xff; op = (config >> 8) & 0xff; result = (config >> 16) & 0xff; if (type >= PERF_COUNT_HW_CACHE_MAX || op >= PERF_COUNT_HW_CACHE_OP_MAX || result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ev = (*ppmu->cache_events)[type][op][result]; if (ev == 0) return -EOPNOTSUPP; if (ev == -1) return -EINVAL; *eventp = ev; return 0; } static int power_pmu_event_init(struct perf_event *event) { u64 ev; unsigned long flags; struct perf_event *ctrs[MAX_HWEVENTS]; u64 events[MAX_HWEVENTS]; unsigned int cflags[MAX_HWEVENTS]; int n; int err; struct cpu_hw_events *cpuhw; if (!ppmu) return -ENOENT; switch (event->attr.type) { case PERF_TYPE_HARDWARE: ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return -EOPNOTSUPP; ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: err = hw_perf_cache_event(event->attr.config, &ev); if (err) return err; break; case PERF_TYPE_RAW: ev = event->attr.config; break; default: return -ENOENT; } event->hw.config_base = ev; event->hw.idx = 0; /* * If we are not running on a hypervisor, force the * exclude_hv bit to 0 so that we don't care what * the user set it to. */ if (!firmware_has_feature(FW_FEATURE_LPAR)) event->attr.exclude_hv = 0; /* * If this is a per-task event, then we can use * PM_RUN_* events interchangeably with their non RUN_* * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. * XXX we should check if the task is an idle task. */ flags = 0; if (event->attach_state & PERF_ATTACH_TASK) flags |= PPMU_ONLY_COUNT_RUN; /* * If this machine has limited events, check whether this * event_id could go on a limited event. */ if (ppmu->flags & PPMU_LIMITED_PMC5_6) { if (can_go_on_limited_pmc(event, ev, flags)) { flags |= PPMU_LIMITED_PMC_OK; } else if (ppmu->limited_pmc_event(ev)) { /* * The requested event_id is on a limited PMC, * but we can't use a limited PMC; see if any * alternative goes on a normal PMC. */ ev = normal_pmc_alternative(ev, flags); if (!ev) return -EINVAL; } } /* * If this is in a group, check if it can go on with all the * other hardware events in the group. We assume the event * hasn't been linked into its leader's sibling list at this point. */ n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, ppmu->n_counter - 1, ctrs, events, cflags); if (n < 0) return -EINVAL; } events[n] = ev; ctrs[n] = event; cflags[n] = flags; if (check_excludes(ctrs, cflags, n, 1)) return -EINVAL; cpuhw = &get_cpu_var(cpu_hw_events); err = power_check_constraints(cpuhw, events, cflags, n + 1); put_cpu_var(cpu_hw_events); if (err) return -EINVAL; event->hw.config = events[n]; event->hw.event_base = cflags[n]; event->hw.last_period = event->hw.sample_period; local64_set(&event->hw.period_left, event->hw.last_period); /* * See if we need to reserve the PMU. * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing * reserve_pmc_hardware or release_pmc_hardware. */ err = 0; if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); if (atomic_read(&num_events) == 0 && reserve_pmc_hardware(perf_event_interrupt)) err = -EBUSY; else atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); } event->destroy = hw_perf_event_destroy; return err; } struct pmu power_pmu = { .pmu_enable = power_pmu_enable, .pmu_disable = power_pmu_disable, .event_init = power_pmu_event_init, .add = power_pmu_add, .del = power_pmu_del, .start = power_pmu_start, .stop = power_pmu_stop, .read = power_pmu_read, .start_txn = power_pmu_start_txn, .cancel_txn = power_pmu_cancel_txn, .commit_txn = power_pmu_commit_txn, }; /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled * here so there is no possibility of being interrupted. */ static void record_and_restart(struct perf_event *event, unsigned long val, struct pt_regs *regs, int nmi) { u64 period = event->hw.sample_period; s64 prev, delta, left; int record = 0; if (event->hw.state & PERF_HES_STOPPED) { write_pmc(event->hw.idx, 0); return; } /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); delta = check_and_compute_delta(prev, val); local64_add(delta, &event->count); /* * See if the total period for this event has expired, * and update for the next period. */ val = 0; left = local64_read(&event->hw.period_left) - delta; if (period) { if (left <= 0) { left += period; if (left <= 0) left = period; record = 1; event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) val = 0x80000000LL - left; } write_pmc(event->hw.idx, val); local64_set(&event->hw.prev_count, val); local64_set(&event->hw.period_left, left); perf_event_update_userpage(event); /* * Finally record data if requested. */ if (record) { struct perf_sample_data data; perf_sample_data_init(&data, ~0ULL); data.period = event->hw.last_period; if (event->attr.sample_type & PERF_SAMPLE_ADDR) perf_get_data_addr(regs, &data.addr); if (perf_event_overflow(event, nmi, &data, regs)) power_pmu_stop(event, 0); } } /* * Called from generic code to get the misc flags (i.e. processor mode) * for an event_id. */ unsigned long perf_misc_flags(struct pt_regs *regs) { u32 flags = perf_get_misc_flags(regs); if (flags) return flags; return user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL; } /* * Called from generic code to get the instruction pointer * for an event_id. */ unsigned long perf_instruction_pointer(struct pt_regs *regs) { unsigned long ip; if (TRAP(regs) != 0xf00) return regs->nip; /* not a PMU interrupt */ ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); return ip; } static bool pmc_overflow(unsigned long val) { if ((int)val < 0) return true; /* * Events on POWER7 can roll back if a speculative event doesn't * eventually complete. Unfortunately in some rare cases they will * raise a performance monitor exception. We need to catch this to * ensure we reset the PMC. In all cases the PMC will be 256 or less * cycles from overflow. * * We only do this if the first pass fails to find any overflowing * PMCs because a user might set a period of less than 256 and we * don't want to mistakenly reset them. */ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) return true; return false; } /* * Performance monitor interrupt stuff */ static void perf_event_interrupt(struct pt_regs *regs) { int i; struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct perf_event *event; unsigned long val; int found = 0; int nmi; if (cpuhw->n_limited) freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), mfspr(SPRN_PMC6)); perf_read_regs(regs); nmi = perf_intr_is_nmi(regs); if (nmi) nmi_enter(); else irq_enter(); for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (!event->hw.idx || is_limited_pmc(event->hw.idx)) continue; val = read_pmc(event->hw.idx); if ((int)val < 0) { /* event has overflowed */ found = 1; record_and_restart(event, val, regs, nmi); } } /* * In case we didn't find and reset the event that caused * the interrupt, scan all events and reset any that are * negative, to avoid getting continual interrupts. * Any that we processed in the previous loop will not be negative. */ if (!found) { for (i = 0; i < ppmu->n_counter; ++i) { if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); if (pmc_overflow(val)) write_pmc(i + 1, 0); } } /* * Reset MMCR0 to its normal value. This will set PMXE and * clear FC (freeze counters) and PMAO (perf mon alert occurred) * and thus allow interrupts to occur again. * XXX might want to use MSR.PM to keep the events frozen until * we get back out of this interrupt. */ write_mmcr0(cpuhw, cpuhw->mmcr[0]); if (nmi) nmi_exit(); else irq_exit(); } static void power_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); if (!ppmu) return; memset(cpuhw, 0, sizeof(*cpuhw)); cpuhw->mmcr[0] = MMCR0_FC; } static int __cpuinit power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: power_pmu_setup(cpu); break; default: break; } return NOTIFY_OK; } int register_power_pmu(struct power_pmu *pmu) { if (ppmu) return -EBUSY; /* something's already registered */ ppmu = pmu; pr_info("%s performance monitor hardware support registered\n", pmu->name); #ifdef MSR_HV /* * Use FCHV to ignore kernel events if MSR.HV is set. */ if (mfmsr() & MSR_HV) freeze_events_kernel = MMCR0_FCHV; #endif /* CONFIG_PPC64 */ perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); perf_cpu_notifier(power_pmu_notifier); return 0; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_13
crossvul-cpp_data_good_3763_0
/* * NET An implementation of the SOCKET network access protocol. * * Version: @(#)socket.c 1.1.93 18/02/95 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Anonymous : NOTSOCK/BADF cleanup. Error fix in * shutdown() * Alan Cox : verify_area() fixes * Alan Cox : Removed DDI * Jonathan Kamens : SOCK_DGRAM reconnect bug * Alan Cox : Moved a load of checks to the very * top level. * Alan Cox : Move address structures to/from user * mode above the protocol layers. * Rob Janssen : Allow 0 length sends. * Alan Cox : Asynchronous I/O support (cribbed from the * tty drivers). * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) * Jeff Uphoff : Made max number of sockets command-line * configurable. * Matti Aarnio : Made the number of sockets dynamic, * to be allocated when needed, and mr. * Uphoff's max is used as max to be * allowed to allocate. * Linus : Argh. removed all the socket allocation * altogether: it's in the inode now. * Alan Cox : Made sock_alloc()/sock_release() public * for NetROM and future kernel nfsd type * stuff. * Alan Cox : sendmsg/recvmsg basics. * Tom Dyas : Export net symbols. * Marcin Dalecki : Fixed problems with CONFIG_NET="n". * Alan Cox : Added thread locking to sys_* calls * for sockets. May have errors at the * moment. * Kevin Buhr : Fixed the dumb errors in the above. * Andi Kleen : Some small cleanups, optimizations, * and fixed a copy_from_user() bug. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) * Tigran Aivazian : Made listen(2) backlog sanity checks * protocol-independent * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * This module is effectively the top level interface to the BSD socket * paradigm. * * Based upon Swansea University Computer Society NET3.039 */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/thread_info.h> #include <linux/rcupdate.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/wanrouter.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> #include <linux/if_vlan.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/kmod.h> #include <linux/audit.h> #include <linux/wireless.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <net/compat.h> #include <net/wext.h> #include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/if_tun.h> #include <linux/ipv6_route.h> #include <linux/route.h> #include <linux/sockios.h> #include <linux/atalk.h> static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); static unsigned int sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. */ static const struct file_operations socket_file_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .aio_read = sock_aio_read, .aio_write = sock_aio_write, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, #endif .mmap = sock_mmap, .open = sock_no_open, /* special open code to disallow open via /proc */ .release = sock_close, .fasync = sock_fasync, .sendpage = sock_sendpage, .splice_write = generic_splice_sendpage, .splice_read = sock_splice_read, }; /* * The protocol list. Each protocol is registered in here. */ static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; /* * Statistics counters of the socket lists */ static DEFINE_PER_CPU(int, sockets_in_use); /* * Support routines. * Move socket addresses back and forth across the kernel/user * divide and look after the messy bits. */ /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space * @kaddr: Address in kernel space * @ulen: Length in user space * * The address is copied into kernel space. If the provided address is * too long an error code of -EINVAL is returned. If the copy gives * invalid addresses -EFAULT is returned. On a success 0 is returned. */ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); } /** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0 || len > sizeof(struct sockaddr_storage)) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); } static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; wq = kmalloc(sizeof(*wq), GFP_KERNEL); if (!wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; RCU_INIT_POINTER(ei->socket.wq, wq); ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; } static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); wq = rcu_dereference_protected(ei->socket.wq, 1); kfree_rcu(wq, rcu); kmem_cache_free(sock_inode_cachep, ei); } static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; } static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode = sock_destroy_inode, .statfs = simple_statfs, }; /* * sockfs_dname() is called from d_path(). */ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", dentry->d_inode->i_ino); } static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "socket:", &sockfs_ops, &sockfs_dentry_operations, SOCKFS_MAGIC); } static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */ static int sock_alloc_file(struct socket *sock, struct file **f, int flags) { struct qstr name = { .name = "" }; struct path path; struct file *file; int fd; fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); if (unlikely(!path.dentry)) { put_unused_fd(fd); return -ENOMEM; } path.mnt = mntget(sock_mnt); d_instantiate(path.dentry, SOCK_INODE(sock)); SOCK_INODE(sock)->i_fop = &socket_file_ops; file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (unlikely(!file)) { /* drop dentry, keep inode */ ihold(path.dentry->d_inode); path_put(&path); put_unused_fd(fd); return -ENFILE; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->f_pos = 0; file->private_data = sock; *f = file; return fd; } int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = sock_alloc_file(sock, &newfile, flags); if (likely(fd >= 0)) fd_install(fd, newfile); return fd; } EXPORT_SYMBOL(sock_map_fd); struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; } EXPORT_SYMBOL(sock_from_file); /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * too is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */ struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; } EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct file *file; struct socket *sock; *err = -EBADF; file = fget_light(fd, fput_needed); if (file) { sock = sock_from_file(file, err); if (sock) return sock; fput_light(file, *fput_needed); } return NULL; } /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */ static struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode_pseudo(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); this_cpu_add(sockets_in_use, 1); return sock; } /* * In theory you can't get an open on this inode, but /proc provides * a back door. Remember to keep it shut otherwise you'll let the * creepy crawlies in. */ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) { return -ENXIO; } const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, .llseek = noop_llseek, }; /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */ void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags)) return; this_cpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; } EXPORT_SYMBOL(sock_release); int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) { *tx_flags = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; if (sock_flag(sk, SOCK_WIFI_STATUS)) *tx_flags |= SKBTX_WIFI_STATUS; return 0; } EXPORT_SYMBOL(sock_tx_timestamp); static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; return sock->ops->sendmsg(iocb, sock, msg, size); } static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { int err = security_socket_sendmsg(sock, msg, size); return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); } int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_sendmsg); static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg_nosec(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec; msg->msg_iovlen = num; result = sock_sendmsg(sock, msg, size); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_sendmsg); static int ktime2ts(ktime_t kt, struct timespec *ts) { if (kt.tv64) { *ts = ktime_to_timespec(kt); return 1; } else { return 0; } } /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct timespec ts[3]; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp.tv64 == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { skb_get_timestampns(skb, &ts[0]); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts[0]), &ts[0]); } } memset(ts, 0, sizeof(ts)); if (skb->tstamp.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) { skb_get_timestampns(skb, ts + 0); empty = 0; } if (shhwtstamps) { if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) && ktime2ts(shhwtstamps->syststamp, ts + 1)) empty = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) && ktime2ts(shhwtstamps->hwtstamp, ts + 2)) empty = 0; } if (!empty) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(ts), &ts); } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int ack; if (!sock_flag(sk, SOCK_WIFI_STATUS)) return; if (!skb->wifi_acked_valid) return; ack = skb->wifi_acked; put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); } EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, sizeof(__u32), &skb->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { sock_recv_timestamp(msg, sk, skb); sock_recv_drops(msg, sk, skb); } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; si->flags = flags; return sock->ops->recvmsg(iocb, sock, msg, size, flags); } static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); } int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_recvmsg); static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } /** * kernel_recvmsg - Receive a message from a socket (kernel space) * @sock: The socket to receive the message from * @msg: Received message * @vec: Input s/g array for message data * @num: Size of input s/g array * @size: Number of bytes to read * @flags: Message flags (MSG_DONTWAIT, etc...) * * On return the msg structure contains the scatter/gather array passed in the * vec argument. The array is modified so that it consists of the unfilled * portion of the original array. * * The returned value is the total number of bytes received, or an error. */ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num; result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_recvmsg); static void sock_aio_dtor(struct kiocb *iocb) { kfree(iocb->private); } static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ flags |= more; return kernel_sendpage(sock, page, offset, size, flags); } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) return -EINVAL; sock_update_classid(sock->sk); return sock->ops->splice_read(sock, ppos, pipe, len, flags); } static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, struct sock_iocb *siocb) { if (!is_sync_kiocb(iocb)) { siocb = kmalloc(sizeof(*siocb), GFP_KERNEL); if (!siocb) return NULL; iocb->ki_dtor = sock_aio_dtor; } siocb->kiocb = iocb; iocb->private = siocb; return siocb; } static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); } static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; if (iocb->ki_left == 0) /* Match SYS5 behaviour */ return 0; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; if (sock->type == SOCK_SEQPACKET) msg->msg_flags |= MSG_EOR; return __sock_sendmsg(iocb, sock, msg, size); } static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } /* * Atomic setting of ioctl hooks to avoid race * with module unload. */ static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook) (struct net *, void __user *arg); void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook) (unsigned int, void __user *); void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) { mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; err = sock->ops->ioctl(sock, cmd, arg); /* * If this ioctl is unknown try to hand it down * to the NIC driver. */ if (err == -ENOIOCTLCMD) err = dev_ioctl(net, cmd, argp); return err; } /* * With an ioctl, arg may well be a user mode pointer, but we don't know * what to do with it - that's up to the protocol still. */ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; struct net *net; sock = file->private_data; sk = sock->sk; net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WEXT_CORE if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { err = dev_ioctl(net, cmd, argp); } else #endif switch (cmd) { case FIOSETOWN: case SIOCSPGRP: err = -EFAULT; if (get_user(pid, (int __user *)argp)) break; err = f_setown(sock->file, pid, 1); break; case FIOGETOWN: case SIOCGPGRP: err = put_user(f_getown(sock->file), (int __user *)argp); break; case SIOCGIFBR: case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: err = -ENOPKG; if (!br_ioctl_hook) request_module("bridge"); mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: err = -ENOPKG; if (!vlan_ioctl_hook) request_module("8021q"); mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: case SIOCDELDLCI: err = -ENOPKG; if (!dlci_ioctl_hook) request_module("dlci"); mutex_lock(&dlci_ioctl_mutex); if (dlci_ioctl_hook) err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; } int sock_create_lite(int family, int type, int protocol, struct socket **res) { int err; struct socket *sock = NULL; err = security_socket_create(family, type, protocol, 1); if (err) goto out; sock = sock_alloc(); if (!sock) { err = -ENOMEM; goto out; } sock->type = type; err = security_socket_post_create(sock, family, type, protocol, 1); if (err) goto out_release; out: *res = sock; return err; out_release: sock_release(sock); sock = NULL; goto out; } EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; return sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) { struct socket *sock = file->private_data; return sock->ops->mmap(file, sock, vma); } static int sock_close(struct inode *inode, struct file *filp) { /* * It was possible the inode is NULL we were * closing an unfinished socket. */ if (!inode) { printk(KERN_DEBUG "sock_close: NULL inode\n"); return 0; } sock_release(SOCKET_I(inode)); return 0; } /* * Update the socket async list * * Fasync_list locking strategy. * * 1. fasync_list is modified only under process context socket lock * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) * or under socket lock */ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; struct socket_wq *wq; if (sk == NULL) return -EINVAL; lock_sock(sk); wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) sock_reset_flag(sk, SOCK_FASYNC); else sock_set_flag(sk, SOCK_FASYNC); release_sock(sk); return 0; } /* This function may be called only under socket lock or callback_lock or rcu_lock */ int sock_wake_async(struct socket *sock, int how, int band) { struct socket_wq *wq; if (!sock) return -1; rcu_read_lock(); wq = rcu_dereference(sock->wq); if (!wq || !wq->fasync_list) { rcu_read_unlock(); return -1; } switch (how) { case SOCK_WAKE_WAITD: if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) break; /* fall through */ case SOCK_WAKE_IO: call_kill: kill_fasync(&wq->fasync_list, SIGIO, band); break; case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(sock_wake_async); int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { static int warned; if (!warned) { warned = 1; printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); } family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { net_warn_ratelimited("socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; } EXPORT_SYMBOL(__sock_create); int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } EXPORT_SYMBOL(sock_create); int sock_create_kern(int family, int type, int protocol, struct socket **res) { return __sock_create(&init_net, family, type, protocol, res, 1); } EXPORT_SYMBOL(sock_create_kern); SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { int retval; struct socket *sock; int flags; /* Check the SOCK_* constants for consistency. */ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; out: /* It may be already another descriptor 8) Not kernel problem. */ return retval; out_release: sock_release(sock); return retval; } /* * Create a pair of connected sockets. */ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec) { struct socket *sock1, *sock2; int fd1, fd2, err; struct file *newfile1, *newfile2; int flags; flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; /* * Obtain the first socket and check if the underlying protocol * supports the socketpair call. */ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; err = sock1->ops->socketpair(sock1, sock2); if (err < 0) goto out_release_both; fd1 = sock_alloc_file(sock1, &newfile1, flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } fd2 = sock_alloc_file(sock2, &newfile2, flags); if (unlikely(fd2 < 0)) { err = fd2; fput(newfile1); put_unused_fd(fd1); sock_release(sock2); goto out; } audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. * Not kernel problem. */ err = put_user(fd1, &usockvec[0]); if (!err) err = put_user(fd2, &usockvec[1]); if (!err) return 0; sys_close(fd2); sys_close(fd1); return err; out_release_both: sock_release(sock2); out_release_1: sock_release(sock1); out: return err; } /* * Bind a name to a socket. Nothing much to do here since it's * the protocol's responsibility to handle the local address. * * We move the socket address to kernel space before we call * the protocol layer (having also checked the address is ok). */ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, &address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; } /* * Perform a listen. Basically, we allow the protocol to do anything * necessary for a listen, and if that works, we mark the socket as * ready for listening. */ SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned int)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; } /* * For accept, we attempt to create a new socket, set up the link * with the client, wake up the client, then return the new * connected fd. We collect the address of the connector in kernel * space and move it to user at the very end. This is unclean because * we open the socket then return an error. * * 1003.1g adds the ability to recvmsg() to query connection pending * status to recvmsg. We need to add that support in a way thats * clean when we restucture accept also. */ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = sock_alloc_file(newsock, &newfile, flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user(&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; } SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen) { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* * Attempt to connect to a socket with the server address. The address * is in user space so we verify it is OK and move it to kernel space. * * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to * break bindings * * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and * other SEQPACKET protocols that take time to connect() as it doesn't * include the -EINPROGRESS status for such sockets. */ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = move_addr_to_kernel(uservaddr, addrlen, &address); if (err < 0) goto out_put; err = security_socket_connect(sock, (struct sockaddr *)&address, addrlen); if (err) goto out_put; err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the local address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the remote address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; } /* * Send a datagram to a given address. We move the address into kernel * space and check the user space data area is readable before invoking * the protocol. */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, &address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Send a datagram down a socket. */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned int, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } /* * Receive a frame from the socket and optionally record the address of the * sender. We verify the buffers are writable and if needed move the * sender address from kernel to user space. */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; } /* * Receive a datagram from a socket. */ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, unsigned int flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, char __user *, optval, int, optlen) { int err, fput_needed; struct socket *sock; if (optlen < 0) return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_setsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, char __user *, optval, int __user *, optlen) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Shutdown a socket. */ SYSCALL_DEFINE2(shutdown, int, fd, int, how) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_shutdown(sock, how); if (!err) err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } return err; } /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. */ #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) struct used_address { struct sockaddr_storage name; unsigned int name_len; }; static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct sockaddr_storage address; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; int err, ctl_len, total_len; err = -EFAULT; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; err = -ENOMEM; iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), GFP_KERNEL); if (!iov) goto out; } /* This will also move the address data into kernel space */ if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ); } else err = verify_iovec(msg_sys, iov, &address, VERIFY_READ); if (err < 0) goto out_freeiov; total_len = err; err = -ENOBUFS; if (msg_sys->msg_controllen > INT_MAX) goto out_freeiov; ctl_len = msg_sys->msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { err = cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) goto out_freeiov; ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) goto out_freeiov; } err = -EFAULT; /* * Careful! Before this, msg_sys->msg_control contains a user pointer. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted * checking falls down on this. */ if (copy_from_user(ctl_buf, (void __user __force *)msg_sys->msg_control, ctl_len)) goto out_freectl; msg_sys->msg_control = ctl_buf; } msg_sys->msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) msg_sys->msg_flags |= MSG_DONTWAIT; /* * If this is sendmmsg() and current destination address is same as * previously succeeded address, omit asking LSM's decision. * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ if (used_address && msg_sys->msg_name && used_address->name_len == msg_sys->msg_namelen && !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; } err = sock_sendmsg(sock, msg_sys, total_len); /* * If this is sendmmsg() and sending to current destination address was * successful, remember it. */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; if (msg_sys->msg_name) memcpy(&used_address->name, msg_sys->msg_name, used_address->name_len); } out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: if (iov != iovstack) kfree(iov); out: return err; } /* * BSD sendmsg interface */ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; } /* * Linux sendmmsg interface */ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct used_address used_address; if (vlen > UIO_MAXIOV) vlen = UIO_MAXIOV; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; used_address.name_len = UINT_MAX; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; err = 0; while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags, &used_address); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = __sys_sendmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags, &used_address); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; } fput_light(sock->file, fput_needed); /* We only return an error if no datagrams were able to be sent */ if (datagrams != 0) return datagrams; return err; } SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { return __sys_sendmmsg(fd, mmsg, vlen, flags); } static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int err, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; if (msg_sys->msg_iovlen > UIO_FASTIOV) { err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; err = -ENOMEM; iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), GFP_KERNEL); if (!iov) goto out; } /* * Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ uaddr = (__force void __user *)msg_sys->msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); } else err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE); if (err < 0) goto out_freeiov; total_len = err; cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user(&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: if (iov != iovstack) kfree(iov); out: return err; } /* * BSD recvmsg interface */ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } /* * Linux recvmmsg interface */ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct timespec end_time; if (timeout && poll_select_set_timeout(&end_time, timeout->tv_sec, timeout->tv_nsec)) return -EINVAL; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; err = sock_error(sock->sk); if (err) goto out_put; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; while (datagrams < vlen) { /* * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = __sys_recvmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ if (flags & MSG_WAITFORONE) flags |= MSG_DONTWAIT; if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); if (timeout->tv_sec < 0) { timeout->tv_sec = timeout->tv_nsec = 0; break; } /* Timeout, return less than vlen datagrams */ if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) break; } /* Out of band data, return right away */ if (msg_sys.msg_flags & MSG_OOB) break; } out_put: fput_light(sock->file, fput_needed); if (err == 0) return datagrams; if (datagrams != 0) { /* * We may return less entries than requested (vlen) if the * sock is non block and there aren't enough datagrams... */ if (err != -EAGAIN) { /* * ... or if recvmsg returns an error after we * received some datagrams, where we record the * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ sock->sk->sk_err = -err; } return datagrams; } return err; } SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct timespec __user *, timeout) { int datagrams; struct timespec timeout_sys; if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) return -EFAULT; datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); if (datagrams > 0 && copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) datagrams = -EFAULT; return datagrams; } #ifdef __ARCH_WANT_SYS_SOCKETCALL /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[21] = { AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), AL(4), AL(5), AL(4) }; #undef AL /* * System call vectors. * * Argument checking cleaned up. Saved 20% in size. * This function doesn't need to set the kernel lock because * it is set by the callees. */ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { unsigned long a[6]; unsigned long a0, a1; int err; unsigned int len; if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; len = nargs[call]; if (len > sizeof(a)) return -EINVAL; /* copy_from_user should be SMP safe. */ if (copy_from_user(a, args, len)) return -EFAULT; audit_socketcall(nargs[call] / sizeof(unsigned long), a); a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: err = sys_socket(a0, a1, a[2]); break; case SYS_BIND: err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_CONNECT: err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_LISTEN: err = sys_listen(a0, a1); break; case SYS_ACCEPT: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = sys_getsockname(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_GETPEERNAME: err = sys_getpeername(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_SOCKETPAIR: err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); break; case SYS_SEND: err = sys_send(a0, (void __user *)a1, a[2], a[3]); break; case SYS_SENDTO: err = sys_sendto(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_RECV: err = sys_recv(a0, (void __user *)a1, a[2], a[3]); break; case SYS_RECVFROM: err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], (int __user *)a[5]); break; case SYS_SHUTDOWN: err = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); break; case SYS_GETSOCKOPT: err = sys_getsockopt(a0, a1, a[2], (char __user *)a[3], (int __user *)a[4]); break; case SYS_SENDMSG: err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], (struct timespec __user *)a[4]); break; case SYS_ACCEPT4: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], a[3]); break; default: err = -EINVAL; break; } return err; } #endif /* __ARCH_WANT_SYS_SOCKETCALL */ /** * sock_register - add a socket protocol handler * @ops: description of protocol * * This function is called by a protocol handler that wants to * advertise its address family, and have it linked into the * socket interface. The value ops->family coresponds to the * socket system call protocol family. */ int sock_register(const struct net_proto_family *ops) { int err; if (ops->family >= NPROTO) { printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); return -ENOBUFS; } spin_lock(&net_family_lock); if (rcu_dereference_protected(net_families[ops->family], lockdep_is_held(&net_family_lock))) err = -EEXIST; else { rcu_assign_pointer(net_families[ops->family], ops); err = 0; } spin_unlock(&net_family_lock); printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); return err; } EXPORT_SYMBOL(sock_register); /** * sock_unregister - remove a protocol handler * @family: protocol family to remove * * This function is called by a protocol handler that wants to * remove its address family, and have it unlinked from the * new socket creation. * * If protocol handler is a module, then it can use module reference * counts to protect against new references. If protocol handler is not * a module then it needs to provide its own protection in * the ops->create routine. */ void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); RCU_INIT_POINTER(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); } EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; /* * Initialize the network sysctl infrastructure. */ err = net_sysctl_init(); if (err) goto out; /* * Initialize sock SLAB cache. */ sk_init(); /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER netfilter_init(); #endif #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING skb_timestamping_init(); #endif out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; } core_initcall(sock_init); /* early initcall */ #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { int cpu; int counter = 0; for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ if (counter < 0) counter = 0; seq_printf(seq, "sockets: used %d\n", counter); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) err = compat_put_timeval(&ktv, up); return err; } static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) err = compat_put_timespec(&kts, up); return err; } static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(struct ifreq)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; err = dev_ioctl(net, SIOCGIFNAME, uifr); if (err) return err; if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) return -EFAULT; return 0; } static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) { struct compat_ifconf ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct compat_ifreq __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * sizeof(struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = dev_ioctl(net, SIOCGIFCONF, uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) return -EFAULT; return 0; } static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_SRXCLSRLINS: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void *)(&rxnfc->fs.m_ext + 1) - (void *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void *)(&rxnfc->fs.location + 1) - (void *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void *)(&rxnfc->fs.m_ext + 1) - (const void *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void *)(&rxnfc->fs.location + 1) - (const void *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) return -EFAULT; return dev_ioctl(net, SIOCWANDEV, uifr); } static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; struct ifreq __user *uifr; mm_segment_t old_fs; int err; u32 data; void __user *datap; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &uifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, uifr); default: return -ENOIOCTLCMD; } } static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *u_ifreq32) { struct ifreq __user *u_ifreq64; char tmp_buf[IFNAMSIZ]; void __user *data64; u32 data32; if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), IFNAMSIZ)) return -EFAULT; if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) return -EFAULT; data64 = compat_ptr(data32); u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); /* Don't check these user accesses, just let that get trapped * in the ioctl handler instead. */ if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ)) return -EFAULT; if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, u_ifreq64); } static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) return -EFAULT; err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); if (!err) { switch (cmd) { case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFMEM: case SIOCGIFHWADDR: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFDSTADDR: case SIOCGIFNETMASK: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) err = -EFAULT; break; } } return err; } static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq ifr; struct compat_ifmap __user *uifmap32; mm_segment_t old_fs; int err; uifmap32 = &uifr32->ifr_ifru.ifru_map; err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __get_user(ifr.ifr_map.port, &uifmap32->port); if (err) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (void __user __force *)&ifr); set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __put_user(ifr.ifr_map.port, &uifmap32->port); if (err) err = -EFAULT; } return err; } static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_data)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_data)) return -EFAULT; return dev_ioctl(net, SIOCSHWTSTAMP, uifr); } struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ u32 rt_mtu; /* per route MTU/Window */ u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; struct in6_rtmsg32 { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; static int routing_ioctl(struct net *net, struct socket *sock, unsigned int cmd, void __user *argp) { int ret; void *r = NULL; struct in6_rtmsg r6; struct rtentry r4; char devname[16]; u32 rtdev; mm_segment_t old_fs = get_fs(); if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); ret |= __get_user(r4.rt_flags, &(ur4->rt_flags)); ret |= __get_user(r4.rt_metric, &(ur4->rt_metric)); ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu)); ret |= __get_user(r4.rt_window, &(ur4->rt_window)); ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt)); ret |= __get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user(devname, compat_ptr(rtdev), 15); r4.rt_dev = (char __user __force *)devname; devname[15] = 0; } else r4.rt_dev = NULL; r = (void *) &r4; } if (ret) { ret = -EFAULT; goto out; } set_fs(KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs(old_fs); out: return ret; } /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE * for some operations; this forces use of the newer bridge-utils that * use compatible ioctls */ static int old_bridge_ioctl(compat_ulong_t __user *argp) { compat_ulong_t tmp; if (get_user(tmp, argp)) return -EFAULT; if (tmp == BRCTL_GET_VERSION) return BRCTL_VERSION + 1; return -EINVAL; } static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return siocdevprivate_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCSHWTSTAMP: return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } return -ENOIOCTLCMD; } static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; } #endif int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } EXPORT_SYMBOL(kernel_bind); int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } EXPORT_SYMBOL(kernel_listen); int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { struct sock *sk = sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock); if (err < 0) goto done; err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); *newsock = NULL; goto done; } (*newsock)->ops = sock->ops; __module_get((*newsock)->ops->owner); done: return err; } EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } EXPORT_SYMBOL(kernel_connect); int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } EXPORT_SYMBOL(kernel_getsockname); int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } EXPORT_SYMBOL(kernel_getpeername); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int __user *uoptlen; int err; uoptval = (char __user __force *) optval; uoptlen = (int __user __force *) optlen; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); else err = sock->ops->getsockopt(sock, level, optname, uoptval, uoptlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_getsockopt); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int err; uoptval = (char __user __force *) optval; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, uoptval, optlen); else err = sock->ops->setsockopt(sock, level, optname, uoptval, optlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_setsockopt); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { sock_update_classid(sock->sk); if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } EXPORT_SYMBOL(kernel_sendpage); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); err = sock->ops->ioctl(sock, cmd, arg); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_sock_ioctl); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(kernel_sock_shutdown);
./CrossVul/dataset_final_sorted/CWE-399/c/good_3763_0
crossvul-cpp_data_bad_3486_17
/* * arch/s390/mm/fault.c * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner (hp@de.ibm.com) * Ulrich Weigand (uweigand@de.ibm.com) * * Derived from "arch/i386/mm/fault.c" * Copyright (C) 1995 Linus Torvalds */ #include <linux/kernel_stat.h> #include <linux/perf_event.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/compat.h> #include <linux/smp.h> #include <linux/kdebug.h> #include <linux/init.h> #include <linux/console.h> #include <linux/module.h> #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/hugetlb.h> #include <asm/asm-offsets.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/compat.h> #include "../kernel/entry.h" #ifndef CONFIG_64BIT #define __FAIL_ADDR_MASK 0x7ffff000 #define __SUBCODE_MASK 0x0200 #define __PF_RES_FIELD 0ULL #else /* CONFIG_64BIT */ #define __FAIL_ADDR_MASK -4096L #define __SUBCODE_MASK 0x0600 #define __PF_RES_FIELD 0x8000000000000000ULL #endif /* CONFIG_64BIT */ #define VM_FAULT_BADCONTEXT 0x010000 #define VM_FAULT_BADMAP 0x020000 #define VM_FAULT_BADACCESS 0x040000 static unsigned long store_indication; void fault_init(void) { if (test_facility(2) && test_facility(75)) store_indication = 0xc00; } static inline int notify_page_fault(struct pt_regs *regs) { int ret = 0; /* kprobe_running() needs smp_processor_id() */ if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 14)) ret = 1; preempt_enable(); } return ret; } /* * Unlock any spinlocks which will prevent us from getting the * message out. */ void bust_spinlocks(int yes) { if (yes) { oops_in_progress = 1; } else { int loglevel_save = console_loglevel; console_unblank(); oops_in_progress = 0; /* * OK, the message is on the console. Now we call printk() * without oops_in_progress set so that printk will give klogd * a poke. Hold onto your hats... */ console_loglevel = 15; printk(" "); console_loglevel = loglevel_save; } } /* * Returns the address space associated with the fault. * Returns 0 for kernel space and 1 for user space. */ static inline int user_space_fault(unsigned long trans_exc_code) { /* * The lowest two bits of the translation exception * identification indicate which paging table was used. */ trans_exc_code &= 3; if (trans_exc_code == 2) /* Access via secondary space, set_fs setting decides */ return current->thread.mm_segment.ar4; if (user_mode == HOME_SPACE_MODE) /* User space if the access has been done via home space. */ return trans_exc_code == 3; /* * If the user space is not the home space the kernel runs in home * space. Access via secondary space has already been covered, * access via primary space or access register is from user space * and access via home space is from the kernel. */ return trans_exc_code != 3; } static inline void report_user_fault(struct pt_regs *regs, long int_code, int signr, unsigned long address) { if ((task_pid_nr(current) > 1) && !show_unhandled_signals) return; if (!unhandled_signal(current, signr)) return; if (!printk_ratelimit()) return; printk("User process fault: interruption code 0x%lX ", int_code); print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); printk("\n"); printk("failing address: %lX\n", address); show_regs(regs); } /* * Send SIGSEGV to task. This is an external routine * to keep the stack usage of do_page_fault small. */ static noinline void do_sigsegv(struct pt_regs *regs, long int_code, int si_code, unsigned long trans_exc_code) { struct siginfo si; unsigned long address; address = trans_exc_code & __FAIL_ADDR_MASK; current->thread.prot_addr = address; current->thread.trap_no = int_code; report_user_fault(regs, int_code, SIGSEGV, address); si.si_signo = SIGSEGV; si.si_code = si_code; si.si_addr = (void __user *) address; force_sig_info(SIGSEGV, &si, current); } static noinline void do_no_context(struct pt_regs *regs, long int_code, unsigned long trans_exc_code) { const struct exception_table_entry *fixup; unsigned long address; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) { regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ address = trans_exc_code & __FAIL_ADDR_MASK; if (!user_space_fault(trans_exc_code)) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %p\n", (void *)address); die("Oops", regs, int_code); do_exit(SIGKILL); } static noinline void do_low_address(struct pt_regs *regs, long int_code, unsigned long trans_exc_code) { /* Low-address protection hit in kernel mode means NULL pointer write access in kernel mode. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* Low-address protection hit in user mode 'cannot happen'. */ die ("Low-address protection", regs, int_code); do_exit(SIGKILL); } do_no_context(regs, int_code, trans_exc_code); } static noinline void do_sigbus(struct pt_regs *regs, long int_code, unsigned long trans_exc_code) { struct task_struct *tsk = current; unsigned long address; struct siginfo si; /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ address = trans_exc_code & __FAIL_ADDR_MASK; tsk->thread.prot_addr = address; tsk->thread.trap_no = int_code; si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_ADRERR; si.si_addr = (void __user *) address; force_sig_info(SIGBUS, &si, tsk); } static noinline void do_fault_error(struct pt_regs *regs, long int_code, unsigned long trans_exc_code, int fault) { int si_code; switch (fault) { case VM_FAULT_BADACCESS: case VM_FAULT_BADMAP: /* Bad memory access. Check if it is kernel or user space. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* User mode accesses just cause a SIGSEGV */ si_code = (fault == VM_FAULT_BADMAP) ? SEGV_MAPERR : SEGV_ACCERR; do_sigsegv(regs, int_code, si_code, trans_exc_code); return; } case VM_FAULT_BADCONTEXT: do_no_context(regs, int_code, trans_exc_code); break; default: /* fault & VM_FAULT_ERROR */ if (fault & VM_FAULT_OOM) { if (!(regs->psw.mask & PSW_MASK_PSTATE)) do_no_context(regs, int_code, trans_exc_code); else pagefault_out_of_memory(); } else if (fault & VM_FAULT_SIGBUS) { /* Kernel mode? Handle exceptions or die */ if (!(regs->psw.mask & PSW_MASK_PSTATE)) do_no_context(regs, int_code, trans_exc_code); else do_sigbus(regs, int_code, trans_exc_code); } else BUG(); break; } } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * interruption code (int_code): * 04 Protection -> Write-Protection (suprression) * 10 Segment translation -> Not present (nullification) * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ static inline int do_exception(struct pt_regs *regs, int access, unsigned long trans_exc_code) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; unsigned long address; unsigned int flags; int fault; if (notify_page_fault(regs)) return 0; tsk = current; mm = tsk->mm; /* * Verify that the fault happened in user space, that * we are not in an interrupt and that there is a * user context. */ fault = VM_FAULT_BADCONTEXT; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto out; address = trans_exc_code & __FAIL_ADDR_MASK; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); flags = FAULT_FLAG_ALLOW_RETRY; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; retry: down_read(&mm->mmap_sem); fault = VM_FAULT_BADMAP; vma = find_vma(mm, address); if (!vma) goto out_up; if (unlikely(vma->vm_start > address)) { if (!(vma->vm_flags & VM_GROWSDOWN)) goto out_up; if (expand_stack(vma, address)) goto out_up; } /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ fault = VM_FAULT_BADACCESS; if (unlikely(!(vma->vm_flags & access))) goto out_up; if (is_vm_hugetlb_page(vma)) address &= HPAGE_MASK; /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) goto out_up; /* * Major/minor page fault accounting is only done on the * initial attempt. If we go through a retry, it is extremely * likely that the page will be found in page cache at that point. */ if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } if (fault & VM_FAULT_RETRY) { /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; goto retry; } } /* * The instruction that caused the program check will * be repeated. Don't signal single step via SIGTRAP. */ clear_tsk_thread_flag(tsk, TIF_PER_TRAP); fault = 0; out_up: up_read(&mm->mmap_sem); out: return fault; } void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, unsigned long trans_exc_code) { int fault; /* Protection exception is suppressing, decrement psw address. */ regs->psw.addr -= (pgm_int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ if (unlikely(!(trans_exc_code & 4))) { do_low_address(regs, pgm_int_code, trans_exc_code); return; } fault = do_exception(regs, VM_WRITE, trans_exc_code); if (unlikely(fault)) do_fault_error(regs, 4, trans_exc_code, fault); } void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, unsigned long trans_exc_code) { int access, fault; access = VM_READ | VM_EXEC | VM_WRITE; fault = do_exception(regs, access, trans_exc_code); if (unlikely(fault)) do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); } #ifdef CONFIG_64BIT void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code, unsigned long trans_exc_code) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); up_read(&mm->mmap_sem); if (vma) { update_mm(mm, current); return; } /* User mode accesses just cause a SIGSEGV */ if (regs->psw.mask & PSW_MASK_PSTATE) { do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code); return; } no_context: do_no_context(regs, pgm_int_code, trans_exc_code); } #endif int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) { struct pt_regs regs; int access, fault; regs.psw.mask = psw_kernel_bits; if (!irqs_disabled()) regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; regs.psw.addr = (unsigned long) __builtin_return_address(0); regs.psw.addr |= PSW_ADDR_AMODE; uaddr &= PAGE_MASK; access = write ? VM_WRITE : VM_READ; fault = do_exception(&regs, access, uaddr | 2); if (unlikely(fault)) { if (fault & VM_FAULT_OOM) return -EFAULT; else if (fault & VM_FAULT_SIGBUS) do_sigbus(&regs, pgm_int_code, uaddr); } return fault ? -EFAULT : 0; } #ifdef CONFIG_PFAULT /* * 'pfault' pseudo page faults routines. */ static int pfault_disable; static int __init nopfault(char *str) { pfault_disable = 1; return 1; } __setup("nopfault", nopfault); struct pfault_refbk { u16 refdiagc; u16 reffcode; u16 refdwlen; u16 refversn; u64 refgaddr; u64 refselmk; u64 refcmpmk; u64 reserved; } __attribute__ ((packed, aligned(8))); int pfault_init(void) { struct pfault_refbk refbk = { .refdiagc = 0x258, .reffcode = 0, .refdwlen = 5, .refversn = 2, .refgaddr = __LC_CURRENT_PID, .refselmk = 1ULL << 48, .refcmpmk = 1ULL << 48, .reserved = __PF_RES_FIELD }; int rc; if (!MACHINE_IS_VM || pfault_disable) return -1; asm volatile( " diag %1,%0,0x258\n" "0: j 2f\n" "1: la %0,8\n" "2:\n" EX_TABLE(0b,1b) : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); return rc; } void pfault_fini(void) { struct pfault_refbk refbk = { .refdiagc = 0x258, .reffcode = 1, .refdwlen = 5, .refversn = 2, }; if (!MACHINE_IS_VM || pfault_disable) return; asm volatile( " diag %0,0,0x258\n" "0:\n" EX_TABLE(0b,0b) : : "a" (&refbk), "m" (refbk) : "cc"); } static DEFINE_SPINLOCK(pfault_lock); static LIST_HEAD(pfault_list); static void pfault_interrupt(unsigned int ext_int_code, unsigned int param32, unsigned long param64) { struct task_struct *tsk; __u16 subcode; pid_t pid; /* * Get the external interruption subcode & pfault * initial/completion signal bit. VM stores this * in the 'cpu address' field associated with the * external interrupt. */ subcode = ext_int_code >> 16; if ((subcode & 0xff00) != __SUBCODE_MASK) return; kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; if (subcode & 0x0080) { /* Get the token (= pid of the affected task). */ pid = sizeof(void *) == 4 ? param32 : param64; rcu_read_lock(); tsk = find_task_by_pid_ns(pid, &init_pid_ns); if (tsk) get_task_struct(tsk); rcu_read_unlock(); if (!tsk) return; } else { tsk = current; } spin_lock(&pfault_lock); if (subcode & 0x0080) { /* signal bit is set -> a page has been swapped in by VM */ if (tsk->thread.pfault_wait == 1) { /* Initial interrupt was faster than the completion * interrupt. pfault_wait is valid. Set pfault_wait * back to zero and wake up the process. This can * safely be done because the task is still sleeping * and can't produce new pfaults. */ tsk->thread.pfault_wait = 0; list_del(&tsk->thread.list); wake_up_process(tsk); } else { /* Completion interrupt was faster than initial * interrupt. Set pfault_wait to -1 so the initial * interrupt doesn't put the task to sleep. */ tsk->thread.pfault_wait = -1; } put_task_struct(tsk); } else { /* signal bit not set -> a real page is missing. */ if (tsk->thread.pfault_wait == -1) { /* Completion interrupt was faster than the initial * interrupt (pfault_wait == -1). Set pfault_wait * back to zero and exit. */ tsk->thread.pfault_wait = 0; } else { /* Initial interrupt arrived before completion * interrupt. Let the task sleep. */ tsk->thread.pfault_wait = 1; list_add(&tsk->thread.list, &pfault_list); set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_tsk_need_resched(tsk); } } spin_unlock(&pfault_lock); } static int __cpuinit pfault_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { struct thread_struct *thread, *next; struct task_struct *tsk; switch (action) { case CPU_DEAD: case CPU_DEAD_FROZEN: spin_lock_irq(&pfault_lock); list_for_each_entry_safe(thread, next, &pfault_list, list) { thread->pfault_wait = 0; list_del(&thread->list); tsk = container_of(thread, struct task_struct, thread); wake_up_process(tsk); } spin_unlock_irq(&pfault_lock); break; default: break; } return NOTIFY_OK; } static int __init pfault_irq_init(void) { int rc; if (!MACHINE_IS_VM) return 0; rc = register_external_interrupt(0x2603, pfault_interrupt); if (rc) goto out_extint; rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; if (rc) goto out_pfault; service_subclass_irq_register(); hotcpu_notifier(pfault_cpu_notify, 0); return 0; out_pfault: unregister_external_interrupt(0x2603, pfault_interrupt); out_extint: pfault_disable = 1; return rc; } early_initcall(pfault_irq_init); #endif /* CONFIG_PFAULT */
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_17
crossvul-cpp_data_bad_3640_0
/* * hugetlbpage-backed filesystem. Based on ramfs. * * William Irwin, 2002 * * Copyright (C) 2002 Linus Torvalds. */ #include <linux/module.h> #include <linux/thread_info.h> #include <asm/current.h> #include <linux/sched.h> /* remove ASAP */ #include <linux/fs.h> #include <linux/mount.h> #include <linux/file.h> #include <linux/kernel.h> #include <linux/writeback.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/string.h> #include <linux/capability.h> #include <linux/ctype.h> #include <linux/backing-dev.h> #include <linux/hugetlb.h> #include <linux/pagevec.h> #include <linux/parser.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/dnotify.h> #include <linux/statfs.h> #include <linux/security.h> #include <linux/magic.h> #include <linux/migrate.h> #include <asm/uaccess.h> static const struct super_operations hugetlbfs_ops; static const struct address_space_operations hugetlbfs_aops; const struct file_operations hugetlbfs_file_operations; static const struct inode_operations hugetlbfs_dir_inode_operations; static const struct inode_operations hugetlbfs_inode_operations; struct hugetlbfs_config { uid_t uid; gid_t gid; umode_t mode; long nr_blocks; long nr_inodes; struct hstate *hstate; }; struct hugetlbfs_inode_info { struct shared_policy policy; struct inode vfs_inode; }; static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) { return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); } static struct backing_dev_info hugetlbfs_backing_dev_info = { .name = "hugetlbfs", .ra_pages = 0, /* No readahead */ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, }; int sysctl_hugetlb_shm_group; enum { Opt_size, Opt_nr_inodes, Opt_mode, Opt_uid, Opt_gid, Opt_pagesize, Opt_err, }; static const match_table_t tokens = { {Opt_size, "size=%s"}, {Opt_nr_inodes, "nr_inodes=%s"}, {Opt_mode, "mode=%o"}, {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_pagesize, "pagesize=%s"}, {Opt_err, NULL}, }; static void huge_pagevec_release(struct pagevec *pvec) { int i; for (i = 0; i < pagevec_count(pvec); ++i) put_page(pvec->pages[i]); pagevec_reinit(pvec); } static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file->f_path.dentry->d_inode; loff_t len, vma_len; int ret; struct hstate *h = hstate_file(file); /* * vma address alignment (but not the pgoff alignment) has * already been checked by prepare_hugepage_range. If you add * any error returns here, do so after setting VM_HUGETLB, so * is_vm_hugetlb_page tests below unmap_region go the right * way when do_mmap_pgoff unwinds (may be important on powerpc * and ia64). */ vma->vm_flags |= VM_HUGETLB | VM_RESERVED; vma->vm_ops = &hugetlb_vm_ops; if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) return -EINVAL; vma_len = (loff_t)(vma->vm_end - vma->vm_start); mutex_lock(&inode->i_mutex); file_accessed(file); ret = -ENOMEM; len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); if (hugetlb_reserve_pages(inode, vma->vm_pgoff >> huge_page_order(h), len >> huge_page_shift(h), vma, vma->vm_flags)) goto out; ret = 0; hugetlb_prefault_arch_hook(vma->vm_mm); if (vma->vm_flags & VM_WRITE && inode->i_size < len) inode->i_size = len; out: mutex_unlock(&inode->i_mutex); return ret; } /* * Called under down_write(mmap_sem). */ #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA static unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; struct hstate *h = hstate_file(file); if (len & ~huge_page_mask(h)) return -EINVAL; if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (len > mm->cached_hole_size) start_addr = mm->free_area_cache; else { start_addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } full_search: addr = ALIGN(start_addr, huge_page_size(h)); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) { /* * Start a new search - just in case we missed * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } if (!vma || addr + len <= vma->vm_start) { mm->free_area_cache = addr + len; return addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = ALIGN(vma->vm_end, huge_page_size(h)); } } #endif static int hugetlbfs_read_actor(struct page *page, unsigned long offset, char __user *buf, unsigned long count, unsigned long size) { char *kaddr; unsigned long left, copied = 0; int i, chunksize; if (size > count) size = count; /* Find which 4k chunk and offset with in that chunk */ i = offset >> PAGE_CACHE_SHIFT; offset = offset & ~PAGE_CACHE_MASK; while (size) { chunksize = PAGE_CACHE_SIZE; if (offset) chunksize -= offset; if (chunksize > size) chunksize = size; kaddr = kmap(&page[i]); left = __copy_to_user(buf, kaddr + offset, chunksize); kunmap(&page[i]); if (left) { copied += (chunksize - left); break; } offset = 0; size -= chunksize; buf += chunksize; copied += chunksize; i++; } return copied ? copied : -EFAULT; } /* * Support for read() - Find the page attached to f_mapping and copy out the * data. Its *very* similar to do_generic_mapping_read(), we can't use that * since it has PAGE_CACHE_SIZE assumptions. */ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { struct hstate *h = hstate_file(filp); struct address_space *mapping = filp->f_mapping; struct inode *inode = mapping->host; unsigned long index = *ppos >> huge_page_shift(h); unsigned long offset = *ppos & ~huge_page_mask(h); unsigned long end_index; loff_t isize; ssize_t retval = 0; /* validate length */ if (len == 0) goto out; for (;;) { struct page *page; unsigned long nr, ret; int ra; /* nr is the maximum number of bytes to copy from this page */ nr = huge_page_size(h); isize = i_size_read(inode); if (!isize) goto out; end_index = (isize - 1) >> huge_page_shift(h); if (index >= end_index) { if (index > end_index) goto out; nr = ((isize - 1) & ~huge_page_mask(h)) + 1; if (nr <= offset) goto out; } nr = nr - offset; /* Find the page */ page = find_lock_page(mapping, index); if (unlikely(page == NULL)) { /* * We have a HOLE, zero out the user-buffer for the * length of the hole or request. */ ret = len < nr ? len : nr; if (clear_user(buf, ret)) ra = -EFAULT; else ra = 0; } else { unlock_page(page); /* * We have the page, copy it to user space buffer. */ ra = hugetlbfs_read_actor(page, offset, buf, len, nr); ret = ra; page_cache_release(page); } if (ra < 0) { if (retval == 0) retval = ra; goto out; } offset += ret; retval += ret; len -= ret; index += offset >> huge_page_shift(h); offset &= ~huge_page_mask(h); /* short read or no more work */ if ((ret != nr) || (len == 0)) break; } out: *ppos = ((loff_t)index << huge_page_shift(h)) + offset; return retval; } static int hugetlbfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { return -EINVAL; } static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { BUG(); return -EINVAL; } static void truncate_huge_page(struct page *page) { cancel_dirty_page(page, /* No IO accounting for huge pages? */0); ClearPageUptodate(page); delete_from_page_cache(page); } static void truncate_hugepages(struct inode *inode, loff_t lstart) { struct hstate *h = hstate_inode(inode); struct address_space *mapping = &inode->i_data; const pgoff_t start = lstart >> huge_page_shift(h); struct pagevec pvec; pgoff_t next; int i, freed = 0; pagevec_init(&pvec, 0); next = start; while (1) { if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { if (next == start) break; next = start; continue; } for (i = 0; i < pagevec_count(&pvec); ++i) { struct page *page = pvec.pages[i]; lock_page(page); if (page->index > next) next = page->index; ++next; truncate_huge_page(page); unlock_page(page); freed++; } huge_pagevec_release(&pvec); } BUG_ON(!lstart && mapping->nrpages); hugetlb_unreserve_pages(inode, start, freed); } static void hugetlbfs_evict_inode(struct inode *inode) { truncate_hugepages(inode, 0); end_writeback(inode); } static inline void hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff) { struct vm_area_struct *vma; struct prio_tree_iter iter; vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) { unsigned long v_offset; /* * Can the expression below overflow on 32-bit arches? * No, because the prio_tree returns us only those vmas * which overlap the truncated area starting at pgoff, * and no vma on a 32-bit arch can span beyond the 4GB. */ if (vma->vm_pgoff < pgoff) v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT; else v_offset = 0; __unmap_hugepage_range(vma, vma->vm_start + v_offset, vma->vm_end, NULL); } } static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) { pgoff_t pgoff; struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); BUG_ON(offset & ~huge_page_mask(h)); pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); mutex_lock(&mapping->i_mmap_mutex); if (!prio_tree_empty(&mapping->i_mmap)) hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); mutex_unlock(&mapping->i_mmap_mutex); truncate_hugepages(inode, offset); return 0; } static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct hstate *h = hstate_inode(inode); int error; unsigned int ia_valid = attr->ia_valid; BUG_ON(!inode); error = inode_change_ok(inode, attr); if (error) return error; if (ia_valid & ATTR_SIZE) { error = -EINVAL; if (attr->ia_size & ~huge_page_mask(h)) return -EINVAL; error = hugetlb_vmtruncate(inode, attr->ia_size); if (error) return error; } setattr_copy(inode, attr); mark_inode_dirty(inode); return 0; } static struct inode *hugetlbfs_get_root(struct super_block *sb, struct hugetlbfs_config *config) { struct inode *inode; inode = new_inode(sb); if (inode) { struct hugetlbfs_inode_info *info; inode->i_ino = get_next_ino(); inode->i_mode = S_IFDIR | config->mode; inode->i_uid = config->uid; inode->i_gid = config->gid; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; info = HUGETLBFS_I(inode); mpol_shared_policy_init(&info->policy, NULL); inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); } return inode; } static struct inode *hugetlbfs_get_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev) { struct inode *inode; inode = new_inode(sb); if (inode) { struct hugetlbfs_inode_info *info; inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_mapping->a_ops = &hugetlbfs_aops; inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; INIT_LIST_HEAD(&inode->i_mapping->private_list); info = HUGETLBFS_I(inode); /* * The policy is initialized here even if we are creating a * private inode because initialization simply creates an * an empty rb tree and calls spin_lock_init(), later when we * call mpol_free_shared_policy() it will just return because * the rb tree will still be empty. */ mpol_shared_policy_init(&info->policy, NULL); switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &hugetlbfs_inode_operations; inode->i_fop = &hugetlbfs_file_operations; break; case S_IFDIR: inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; break; } lockdep_annotate_inode_mutex_key(inode); } return inode; } /* * File creation. Allocate an inode, and we're done.. */ static int hugetlbfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { struct inode *inode; int error = -ENOSPC; inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); if (inode) { dir->i_ctime = dir->i_mtime = CURRENT_TIME; d_instantiate(dentry, inode); dget(dentry); /* Extra count - pin the dentry in core */ error = 0; } return error; } static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); if (!retval) inc_nlink(dir); return retval; } static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); } static int hugetlbfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; int error = -ENOSPC; inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); if (inode) { int l = strlen(symname)+1; error = page_symlink(inode, symname, l); if (!error) { d_instantiate(dentry, inode); dget(dentry); } else iput(inode); } dir->i_ctime = dir->i_mtime = CURRENT_TIME; return error; } /* * mark the head page dirty */ static int hugetlbfs_set_page_dirty(struct page *page) { struct page *head = compound_head(page); SetPageDirty(head); return 0; } static int hugetlbfs_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) { int rc; rc = migrate_huge_page_move_mapping(mapping, newpage, page); if (rc) return rc; migrate_page_copy(newpage, page); return 0; } static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); struct hstate *h = hstate_inode(dentry->d_inode); buf->f_type = HUGETLBFS_MAGIC; buf->f_bsize = huge_page_size(h); if (sbinfo) { spin_lock(&sbinfo->stat_lock); /* If no limits set, just report 0 for max/free/used * blocks, like simple_statfs() */ if (sbinfo->max_blocks >= 0) { buf->f_blocks = sbinfo->max_blocks; buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; buf->f_files = sbinfo->max_inodes; buf->f_ffree = sbinfo->free_inodes; } spin_unlock(&sbinfo->stat_lock); } buf->f_namelen = NAME_MAX; return 0; } static void hugetlbfs_put_super(struct super_block *sb) { struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); if (sbi) { sb->s_fs_info = NULL; kfree(sbi); } } static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) { if (sbinfo->free_inodes >= 0) { spin_lock(&sbinfo->stat_lock); if (unlikely(!sbinfo->free_inodes)) { spin_unlock(&sbinfo->stat_lock); return 0; } sbinfo->free_inodes--; spin_unlock(&sbinfo->stat_lock); } return 1; } static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) { if (sbinfo->free_inodes >= 0) { spin_lock(&sbinfo->stat_lock); sbinfo->free_inodes++; spin_unlock(&sbinfo->stat_lock); } } static struct kmem_cache *hugetlbfs_inode_cachep; static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); struct hugetlbfs_inode_info *p; if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) return NULL; p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); if (unlikely(!p)) { hugetlbfs_inc_free_inodes(sbinfo); return NULL; } return &p->vfs_inode; } static void hugetlbfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); } static void hugetlbfs_destroy_inode(struct inode *inode) { hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); call_rcu(&inode->i_rcu, hugetlbfs_i_callback); } static const struct address_space_operations hugetlbfs_aops = { .write_begin = hugetlbfs_write_begin, .write_end = hugetlbfs_write_end, .set_page_dirty = hugetlbfs_set_page_dirty, .migratepage = hugetlbfs_migrate_page, }; static void init_once(void *foo) { struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; inode_init_once(&ei->vfs_inode); } const struct file_operations hugetlbfs_file_operations = { .read = hugetlbfs_read, .mmap = hugetlbfs_file_mmap, .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, .llseek = default_llseek, }; static const struct inode_operations hugetlbfs_dir_inode_operations = { .create = hugetlbfs_create, .lookup = simple_lookup, .link = simple_link, .unlink = simple_unlink, .symlink = hugetlbfs_symlink, .mkdir = hugetlbfs_mkdir, .rmdir = simple_rmdir, .mknod = hugetlbfs_mknod, .rename = simple_rename, .setattr = hugetlbfs_setattr, }; static const struct inode_operations hugetlbfs_inode_operations = { .setattr = hugetlbfs_setattr, }; static const struct super_operations hugetlbfs_ops = { .alloc_inode = hugetlbfs_alloc_inode, .destroy_inode = hugetlbfs_destroy_inode, .evict_inode = hugetlbfs_evict_inode, .statfs = hugetlbfs_statfs, .put_super = hugetlbfs_put_super, .show_options = generic_show_options, }; static int hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) { char *p, *rest; substring_t args[MAX_OPT_ARGS]; int option; unsigned long long size = 0; enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE; if (!options) return 0; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) goto bad_val; pconfig->uid = option; break; case Opt_gid: if (match_int(&args[0], &option)) goto bad_val; pconfig->gid = option; break; case Opt_mode: if (match_octal(&args[0], &option)) goto bad_val; pconfig->mode = option & 01777U; break; case Opt_size: { /* memparse() will accept a K/M/G without a digit */ if (!isdigit(*args[0].from)) goto bad_val; size = memparse(args[0].from, &rest); setsize = SIZE_STD; if (*rest == '%') setsize = SIZE_PERCENT; break; } case Opt_nr_inodes: /* memparse() will accept a K/M/G without a digit */ if (!isdigit(*args[0].from)) goto bad_val; pconfig->nr_inodes = memparse(args[0].from, &rest); break; case Opt_pagesize: { unsigned long ps; ps = memparse(args[0].from, &rest); pconfig->hstate = size_to_hstate(ps); if (!pconfig->hstate) { printk(KERN_ERR "hugetlbfs: Unsupported page size %lu MB\n", ps >> 20); return -EINVAL; } break; } default: printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n", p); return -EINVAL; break; } } /* Do size after hstate is set up */ if (setsize > NO_SIZE) { struct hstate *h = pconfig->hstate; if (setsize == SIZE_PERCENT) { size <<= huge_page_shift(h); size *= h->max_huge_pages; do_div(size, 100); } pconfig->nr_blocks = (size >> huge_page_shift(h)); } return 0; bad_val: printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n", args[0].from, p); return -EINVAL; } static int hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) { struct inode * inode; struct dentry * root; int ret; struct hugetlbfs_config config; struct hugetlbfs_sb_info *sbinfo; save_mount_options(sb, data); config.nr_blocks = -1; /* No limit on size by default */ config.nr_inodes = -1; /* No limit on number of inodes by default */ config.uid = current_fsuid(); config.gid = current_fsgid(); config.mode = 0755; config.hstate = &default_hstate; ret = hugetlbfs_parse_options(data, &config); if (ret) return ret; sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); if (!sbinfo) return -ENOMEM; sb->s_fs_info = sbinfo; sbinfo->hstate = config.hstate; spin_lock_init(&sbinfo->stat_lock); sbinfo->max_blocks = config.nr_blocks; sbinfo->free_blocks = config.nr_blocks; sbinfo->max_inodes = config.nr_inodes; sbinfo->free_inodes = config.nr_inodes; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = huge_page_size(config.hstate); sb->s_blocksize_bits = huge_page_shift(config.hstate); sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; sb->s_time_gran = 1; inode = hugetlbfs_get_root(sb, &config); if (!inode) goto out_free; root = d_alloc_root(inode); if (!root) { iput(inode); goto out_free; } sb->s_root = root; return 0; out_free: kfree(sbinfo); return -ENOMEM; } int hugetlb_get_quota(struct address_space *mapping, long delta) { int ret = 0; struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); if (sbinfo->free_blocks > -1) { spin_lock(&sbinfo->stat_lock); if (sbinfo->free_blocks - delta >= 0) sbinfo->free_blocks -= delta; else ret = -ENOMEM; spin_unlock(&sbinfo->stat_lock); } return ret; } void hugetlb_put_quota(struct address_space *mapping, long delta) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); if (sbinfo->free_blocks > -1) { spin_lock(&sbinfo->stat_lock); sbinfo->free_blocks += delta; spin_unlock(&sbinfo->stat_lock); } } static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super); } static struct file_system_type hugetlbfs_fs_type = { .name = "hugetlbfs", .mount = hugetlbfs_mount, .kill_sb = kill_litter_super, }; static struct vfsmount *hugetlbfs_vfsmount; static int can_do_hugetlb_shm(void) { return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); } struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, struct user_struct **user, int creat_flags) { int error = -ENOMEM; struct file *file; struct inode *inode; struct path path; struct dentry *root; struct qstr quick_string; *user = NULL; if (!hugetlbfs_vfsmount) return ERR_PTR(-ENOENT); if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { *user = current_user(); if (user_shm_lock(size, *user)) { printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n"); } else { *user = NULL; return ERR_PTR(-EPERM); } } root = hugetlbfs_vfsmount->mnt_root; quick_string.name = name; quick_string.len = strlen(quick_string.name); quick_string.hash = 0; path.dentry = d_alloc(root, &quick_string); if (!path.dentry) goto out_shm_unlock; path.mnt = mntget(hugetlbfs_vfsmount); error = -ENOSPC; inode = hugetlbfs_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0); if (!inode) goto out_dentry; error = -ENOMEM; if (hugetlb_reserve_pages(inode, 0, size >> huge_page_shift(hstate_inode(inode)), NULL, acctflag)) goto out_inode; d_instantiate(path.dentry, inode); inode->i_size = size; clear_nlink(inode); error = -ENFILE; file = alloc_file(&path, FMODE_WRITE | FMODE_READ, &hugetlbfs_file_operations); if (!file) goto out_dentry; /* inode is already attached */ return file; out_inode: iput(inode); out_dentry: path_put(&path); out_shm_unlock: if (*user) { user_shm_unlock(size, *user); *user = NULL; } return ERR_PTR(error); } static int __init init_hugetlbfs_fs(void) { int error; struct vfsmount *vfsmount; error = bdi_init(&hugetlbfs_backing_dev_info); if (error) return error; hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", sizeof(struct hugetlbfs_inode_info), 0, 0, init_once); if (hugetlbfs_inode_cachep == NULL) goto out2; error = register_filesystem(&hugetlbfs_fs_type); if (error) goto out; vfsmount = kern_mount(&hugetlbfs_fs_type); if (!IS_ERR(vfsmount)) { hugetlbfs_vfsmount = vfsmount; return 0; } error = PTR_ERR(vfsmount); out: if (error) kmem_cache_destroy(hugetlbfs_inode_cachep); out2: bdi_destroy(&hugetlbfs_backing_dev_info); return error; } static void __exit exit_hugetlbfs_fs(void) { kmem_cache_destroy(hugetlbfs_inode_cachep); kern_unmount(hugetlbfs_vfsmount); unregister_filesystem(&hugetlbfs_fs_type); bdi_destroy(&hugetlbfs_backing_dev_info); } module_init(init_hugetlbfs_fs) module_exit(exit_hugetlbfs_fs) MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3640_0
crossvul-cpp_data_good_5352_0
/* mpi-pow.c - MPI functions * Copyright (C) 1994, 1996, 1998, 2000 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include <linux/string.h> #include "mpi-internal.h" #include "longlong.h" /**************** * RES = BASE ^ EXP mod MOD */ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) { mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; mpi_ptr_t xp_marker = NULL; mpi_ptr_t tspace = NULL; mpi_ptr_t rp, ep, mp, bp; mpi_size_t esize, msize, bsize, rsize; int esign, msign, bsign, rsign; mpi_size_t size; int mod_shift_cnt; int negative_result; int assign_rp = 0; mpi_size_t tsize = 0; /* to avoid compiler warning */ /* fixme: we should check that the warning is void */ int rc = -ENOMEM; esize = exp->nlimbs; msize = mod->nlimbs; size = 2 * msize; esign = exp->sign; msign = mod->sign; rp = res->d; ep = exp->d; if (!msize) return -EINVAL; if (!esize) { /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 * depending on if MOD equals 1. */ res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; if (res->nlimbs) { if (mpi_resize(res, 1) < 0) goto enomem; rp = res->d; rp[0] = 1; } res->sign = 0; goto leave; } /* Normalize MOD (i.e. make its most significant bit set) as required by * mpn_divrem. This will make the intermediate values in the calculation * slightly larger, but the correct result is obtained after a final * reduction using the original MOD value. */ mp = mp_marker = mpi_alloc_limb_space(msize); if (!mp) goto enomem; mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]); if (mod_shift_cnt) mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt); else MPN_COPY(mp, mod->d, msize); bsize = base->nlimbs; bsign = base->sign; if (bsize > msize) { /* The base is larger than the module. Reduce it. */ /* Allocate (BSIZE + 1) with space for remainder and quotient. * (The quotient is (bsize - msize + 1) limbs.) */ bp = bp_marker = mpi_alloc_limb_space(bsize + 1); if (!bp) goto enomem; MPN_COPY(bp, base->d, bsize); /* We don't care about the quotient, store it above the remainder, * at BP + MSIZE. */ mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize); bsize = msize; /* Canonicalize the base, since we are going to multiply with it * quite a few times. */ MPN_NORMALIZE(bp, bsize); } else bp = base->d; if (!bsize) { res->nlimbs = 0; res->sign = 0; goto leave; } if (res->alloced < size) { /* We have to allocate more space for RES. If any of the input * parameters are identical to RES, defer deallocation of the old * space. */ if (rp == ep || rp == mp || rp == bp) { rp = mpi_alloc_limb_space(size); if (!rp) goto enomem; assign_rp = 1; } else { if (mpi_resize(res, size) < 0) goto enomem; rp = res->d; } } else { /* Make BASE, EXP and MOD not overlap with RES. */ if (rp == bp) { /* RES and BASE are identical. Allocate temp. space for BASE. */ BUG_ON(bp_marker); bp = bp_marker = mpi_alloc_limb_space(bsize); if (!bp) goto enomem; MPN_COPY(bp, rp, bsize); } if (rp == ep) { /* RES and EXP are identical. Allocate temp. space for EXP. */ ep = ep_marker = mpi_alloc_limb_space(esize); if (!ep) goto enomem; MPN_COPY(ep, rp, esize); } if (rp == mp) { /* RES and MOD are identical. Allocate temporary space for MOD. */ BUG_ON(mp_marker); mp = mp_marker = mpi_alloc_limb_space(msize); if (!mp) goto enomem; MPN_COPY(mp, rp, msize); } } MPN_COPY(rp, bp, bsize); rsize = bsize; rsign = bsign; { mpi_size_t i; mpi_ptr_t xp; int c; mpi_limb_t e; mpi_limb_t carry_limb; struct karatsuba_ctx karactx; xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); if (!xp) goto enomem; memset(&karactx, 0, sizeof karactx); negative_result = (ep[0] & 1) && base->sign; i = esize - 1; e = ep[i]; c = count_leading_zeros(e); e = (e << c) << 1; /* shift the exp bits to the left, lose msb */ c = BITS_PER_MPI_LIMB - 1 - c; /* Main loop. * * Make the result be pointed to alternately by XP and RP. This * helps us avoid block copying, which would otherwise be necessary * with the overlap restrictions of mpihelp_divmod. With 50% probability * the result after this loop will be in the area originally pointed * by RP (==RES->d), and with 50% probability in the area originally * pointed to by XP. */ for (;;) { while (c) { mpi_ptr_t tp; mpi_size_t xsize; /*if (mpihelp_mul_n(xp, rp, rp, rsize) < 0) goto enomem */ if (rsize < KARATSUBA_THRESHOLD) mpih_sqr_n_basecase(xp, rp, rsize); else { if (!tspace) { tsize = 2 * rsize; tspace = mpi_alloc_limb_space(tsize); if (!tspace) goto enomem; } else if (tsize < (2 * rsize)) { mpi_free_limb_space(tspace); tsize = 2 * rsize; tspace = mpi_alloc_limb_space(tsize); if (!tspace) goto enomem; } mpih_sqr_n(xp, rp, rsize, tspace); } xsize = 2 * rsize; if (xsize > msize) { mpihelp_divrem(xp + msize, 0, xp, xsize, mp, msize); xsize = msize; } tp = rp; rp = xp; xp = tp; rsize = xsize; if ((mpi_limb_signed_t) e < 0) { /*mpihelp_mul( xp, rp, rsize, bp, bsize ); */ if (bsize < KARATSUBA_THRESHOLD) { mpi_limb_t tmp; if (mpihelp_mul (xp, rp, rsize, bp, bsize, &tmp) < 0) goto enomem; } else { if (mpihelp_mul_karatsuba_case (xp, rp, rsize, bp, bsize, &karactx) < 0) goto enomem; } xsize = rsize + bsize; if (xsize > msize) { mpihelp_divrem(xp + msize, 0, xp, xsize, mp, msize); xsize = msize; } tp = rp; rp = xp; xp = tp; rsize = xsize; } e <<= 1; c--; } i--; if (i < 0) break; e = ep[i]; c = BITS_PER_MPI_LIMB; } /* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT * steps. Adjust the result by reducing it with the original MOD. * * Also make sure the result is put in RES->d (where it already * might be, see above). */ if (mod_shift_cnt) { carry_limb = mpihelp_lshift(res->d, rp, rsize, mod_shift_cnt); rp = res->d; if (carry_limb) { rp[rsize] = carry_limb; rsize++; } } else { MPN_COPY(res->d, rp, rsize); rp = res->d; } if (rsize >= msize) { mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize); rsize = msize; } /* Remove any leading zero words from the result. */ if (mod_shift_cnt) mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); MPN_NORMALIZE(rp, rsize); mpihelp_release_karatsuba_ctx(&karactx); } if (negative_result && rsize) { if (mod_shift_cnt) mpihelp_rshift(mp, mp, msize, mod_shift_cnt); mpihelp_sub(rp, mp, msize, rp, rsize); rsize = msize; rsign = msign; MPN_NORMALIZE(rp, rsize); } res->nlimbs = rsize; res->sign = rsign; leave: rc = 0; enomem: if (assign_rp) mpi_assign_limb_space(res, rp, size); if (mp_marker) mpi_free_limb_space(mp_marker); if (bp_marker) mpi_free_limb_space(bp_marker); if (ep_marker) mpi_free_limb_space(ep_marker); if (xp_marker) mpi_free_limb_space(xp_marker); if (tspace) mpi_free_limb_space(tspace); return rc; } EXPORT_SYMBOL_GPL(mpi_powm);
./CrossVul/dataset_final_sorted/CWE-399/c/good_5352_0
crossvul-cpp_data_bad_1713_0
/* Copyright (C) 2009 Red Hat, Inc. * Copyright (C) 2006 Rusty Russell IBM Corporation * * Author: Michael S. Tsirkin <mst@redhat.com> * * Inspiration, some code, and most witty comments come from * Documentation/virtual/lguest/lguest.c, by Rusty Russell * * This work is licensed under the terms of the GNU GPL, version 2. * * Generic code for virtio server in host kernel. */ #include <linux/eventfd.h> #include <linux/vhost.h> #include <linux/uio.h> #include <linux/mm.h> #include <linux/mmu_context.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/kthread.h> #include <linux/cgroup.h> #include <linux/module.h> #include <linux/sort.h> #include "vhost.h" static ushort max_mem_regions = 64; module_param(max_mem_regions, ushort, 0444); MODULE_PARM_DESC(max_mem_regions, "Maximum number of memory regions in memory map. (default: 64)"); enum { VHOST_MEMORY_F_LOG = 0x1, }; #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq) { vq->user_be = !virtio_legacy_is_little_endian(); } static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) { struct vhost_vring_state s; if (vq->private_data) return -EBUSY; if (copy_from_user(&s, argp, sizeof(s))) return -EFAULT; if (s.num != VHOST_VRING_LITTLE_ENDIAN && s.num != VHOST_VRING_BIG_ENDIAN) return -EINVAL; vq->user_be = s.num; return 0; } static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, int __user *argp) { struct vhost_vring_state s = { .index = idx, .num = vq->user_be }; if (copy_to_user(argp, &s, sizeof(s))) return -EFAULT; return 0; } static void vhost_init_is_le(struct vhost_virtqueue *vq) { /* Note for legacy virtio: user_be is initialized at reset time * according to the host endianness. If userspace does not set an * explicit endianness, the default behavior is native endian, as * expected by legacy virtio. */ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; } #else static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq) { } static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) { return -ENOIOCTLCMD; } static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, int __user *argp) { return -ENOIOCTLCMD; } static void vhost_init_is_le(struct vhost_virtqueue *vq) { if (vhost_has_feature(vq, VIRTIO_F_VERSION_1)) vq->is_le = true; } #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { struct vhost_poll *poll; poll = container_of(pt, struct vhost_poll, table); poll->wqh = wqh; add_wait_queue(wqh, &poll->wait); } static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); if (!((unsigned long)key & poll->mask)) return 0; vhost_poll_queue(poll); return 0; } void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) { INIT_LIST_HEAD(&work->node); work->fn = fn; init_waitqueue_head(&work->done); work->flushing = 0; work->queue_seq = work->done_seq = 0; } EXPORT_SYMBOL_GPL(vhost_work_init); /* Init poll structure */ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, unsigned long mask, struct vhost_dev *dev) { init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); init_poll_funcptr(&poll->table, vhost_poll_func); poll->mask = mask; poll->dev = dev; poll->wqh = NULL; vhost_work_init(&poll->work, fn); } EXPORT_SYMBOL_GPL(vhost_poll_init); /* Start polling a file. We add ourselves to file's wait queue. The caller must * keep a reference to a file until after vhost_poll_stop is called. */ int vhost_poll_start(struct vhost_poll *poll, struct file *file) { unsigned long mask; int ret = 0; if (poll->wqh) return 0; mask = file->f_op->poll(file, &poll->table); if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); if (mask & POLLERR) { if (poll->wqh) remove_wait_queue(poll->wqh, &poll->wait); ret = -EINVAL; } return ret; } EXPORT_SYMBOL_GPL(vhost_poll_start); /* Stop polling a file. After this function returns, it becomes safe to drop the * file reference. You must also flush afterwards. */ void vhost_poll_stop(struct vhost_poll *poll) { if (poll->wqh) { remove_wait_queue(poll->wqh, &poll->wait); poll->wqh = NULL; } } EXPORT_SYMBOL_GPL(vhost_poll_stop); static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, unsigned seq) { int left; spin_lock_irq(&dev->work_lock); left = seq - work->done_seq; spin_unlock_irq(&dev->work_lock); return left <= 0; } void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) { unsigned seq; int flushing; spin_lock_irq(&dev->work_lock); seq = work->queue_seq; work->flushing++; spin_unlock_irq(&dev->work_lock); wait_event(work->done, vhost_work_seq_done(dev, work, seq)); spin_lock_irq(&dev->work_lock); flushing = --work->flushing; spin_unlock_irq(&dev->work_lock); BUG_ON(flushing < 0); } EXPORT_SYMBOL_GPL(vhost_work_flush); /* Flush any work that has been scheduled. When calling this, don't hold any * locks that are also used by the callback. */ void vhost_poll_flush(struct vhost_poll *poll) { vhost_work_flush(poll->dev, &poll->work); } EXPORT_SYMBOL_GPL(vhost_poll_flush); void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) { unsigned long flags; spin_lock_irqsave(&dev->work_lock, flags); if (list_empty(&work->node)) { list_add_tail(&work->node, &dev->work_list); work->queue_seq++; spin_unlock_irqrestore(&dev->work_lock, flags); wake_up_process(dev->worker); } else { spin_unlock_irqrestore(&dev->work_lock, flags); } } EXPORT_SYMBOL_GPL(vhost_work_queue); void vhost_poll_queue(struct vhost_poll *poll) { vhost_work_queue(poll->dev, &poll->work); } EXPORT_SYMBOL_GPL(vhost_poll_queue); static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { vq->num = 1; vq->desc = NULL; vq->avail = NULL; vq->used = NULL; vq->last_avail_idx = 0; vq->avail_idx = 0; vq->last_used_idx = 0; vq->signalled_used = 0; vq->signalled_used_valid = false; vq->used_flags = 0; vq->log_used = false; vq->log_addr = -1ull; vq->private_data = NULL; vq->acked_features = 0; vq->log_base = NULL; vq->error_ctx = NULL; vq->error = NULL; vq->kick = NULL; vq->call_ctx = NULL; vq->call = NULL; vq->log_ctx = NULL; vq->memory = NULL; vq->is_le = virtio_legacy_is_little_endian(); vhost_vq_reset_user_be(vq); } static int vhost_worker(void *data) { struct vhost_dev *dev = data; struct vhost_work *work = NULL; unsigned uninitialized_var(seq); mm_segment_t oldfs = get_fs(); set_fs(USER_DS); use_mm(dev->mm); for (;;) { /* mb paired w/ kthread_stop */ set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&dev->work_lock); if (work) { work->done_seq = seq; if (work->flushing) wake_up_all(&work->done); } if (kthread_should_stop()) { spin_unlock_irq(&dev->work_lock); __set_current_state(TASK_RUNNING); break; } if (!list_empty(&dev->work_list)) { work = list_first_entry(&dev->work_list, struct vhost_work, node); list_del_init(&work->node); seq = work->queue_seq; } else work = NULL; spin_unlock_irq(&dev->work_lock); if (work) { __set_current_state(TASK_RUNNING); work->fn(work); if (need_resched()) schedule(); } else schedule(); } unuse_mm(dev->mm); set_fs(oldfs); return 0; } static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) { kfree(vq->indirect); vq->indirect = NULL; kfree(vq->log); vq->log = NULL; kfree(vq->heads); vq->heads = NULL; } /* Helper to allocate iovec buffers for all vqs. */ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) { struct vhost_virtqueue *vq; int i; for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV, GFP_KERNEL); vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL); vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL); if (!vq->indirect || !vq->log || !vq->heads) goto err_nomem; } return 0; err_nomem: for (; i >= 0; --i) vhost_vq_free_iovecs(dev->vqs[i]); return -ENOMEM; } static void vhost_dev_free_iovecs(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) vhost_vq_free_iovecs(dev->vqs[i]); } void vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs) { struct vhost_virtqueue *vq; int i; dev->vqs = vqs; dev->nvqs = nvqs; mutex_init(&dev->mutex); dev->log_ctx = NULL; dev->log_file = NULL; dev->memory = NULL; dev->mm = NULL; spin_lock_init(&dev->work_lock); INIT_LIST_HEAD(&dev->work_list); dev->worker = NULL; for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; vq->log = NULL; vq->indirect = NULL; vq->heads = NULL; vq->dev = dev; mutex_init(&vq->mutex); vhost_vq_reset(dev, vq); if (vq->handle_kick) vhost_poll_init(&vq->poll, vq->handle_kick, POLLIN, dev); } } EXPORT_SYMBOL_GPL(vhost_dev_init); /* Caller should have device mutex */ long vhost_dev_check_owner(struct vhost_dev *dev) { /* Are you the owner? If not, I don't think you mean to do that */ return dev->mm == current->mm ? 0 : -EPERM; } EXPORT_SYMBOL_GPL(vhost_dev_check_owner); struct vhost_attach_cgroups_struct { struct vhost_work work; struct task_struct *owner; int ret; }; static void vhost_attach_cgroups_work(struct vhost_work *work) { struct vhost_attach_cgroups_struct *s; s = container_of(work, struct vhost_attach_cgroups_struct, work); s->ret = cgroup_attach_task_all(s->owner, current); } static int vhost_attach_cgroups(struct vhost_dev *dev) { struct vhost_attach_cgroups_struct attach; attach.owner = current; vhost_work_init(&attach.work, vhost_attach_cgroups_work); vhost_work_queue(dev, &attach.work); vhost_work_flush(dev, &attach.work); return attach.ret; } /* Caller should have device mutex */ bool vhost_dev_has_owner(struct vhost_dev *dev) { return dev->mm; } EXPORT_SYMBOL_GPL(vhost_dev_has_owner); /* Caller should have device mutex */ long vhost_dev_set_owner(struct vhost_dev *dev) { struct task_struct *worker; int err; /* Is there an owner already? */ if (vhost_dev_has_owner(dev)) { err = -EBUSY; goto err_mm; } /* No owner, become one */ dev->mm = get_task_mm(current); worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); if (IS_ERR(worker)) { err = PTR_ERR(worker); goto err_worker; } dev->worker = worker; wake_up_process(worker); /* avoid contributing to loadavg */ err = vhost_attach_cgroups(dev); if (err) goto err_cgroup; err = vhost_dev_alloc_iovecs(dev); if (err) goto err_cgroup; return 0; err_cgroup: kthread_stop(worker); dev->worker = NULL; err_worker: if (dev->mm) mmput(dev->mm); dev->mm = NULL; err_mm: return err; } EXPORT_SYMBOL_GPL(vhost_dev_set_owner); struct vhost_memory *vhost_dev_reset_owner_prepare(void) { return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); } EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); /* Caller should have device mutex */ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory) { int i; vhost_dev_cleanup(dev, true); /* Restore memory to default empty mapping. */ memory->nregions = 0; dev->memory = memory; /* We don't need VQ locks below since vhost_dev_cleanup makes sure * VQs aren't running. */ for (i = 0; i < dev->nvqs; ++i) dev->vqs[i]->memory = memory; } EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); void vhost_dev_stop(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { vhost_poll_stop(&dev->vqs[i]->poll); vhost_poll_flush(&dev->vqs[i]->poll); } } } EXPORT_SYMBOL_GPL(vhost_dev_stop); /* Caller should have device mutex if and only if locked is set */ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) { int i; for (i = 0; i < dev->nvqs; ++i) { if (dev->vqs[i]->error_ctx) eventfd_ctx_put(dev->vqs[i]->error_ctx); if (dev->vqs[i]->error) fput(dev->vqs[i]->error); if (dev->vqs[i]->kick) fput(dev->vqs[i]->kick); if (dev->vqs[i]->call_ctx) eventfd_ctx_put(dev->vqs[i]->call_ctx); if (dev->vqs[i]->call) fput(dev->vqs[i]->call); vhost_vq_reset(dev, dev->vqs[i]); } vhost_dev_free_iovecs(dev); if (dev->log_ctx) eventfd_ctx_put(dev->log_ctx); dev->log_ctx = NULL; if (dev->log_file) fput(dev->log_file); dev->log_file = NULL; /* No one will access memory at this point */ kvfree(dev->memory); dev->memory = NULL; WARN_ON(!list_empty(&dev->work_list)); if (dev->worker) { kthread_stop(dev->worker); dev->worker = NULL; } if (dev->mm) mmput(dev->mm); dev->mm = NULL; } EXPORT_SYMBOL_GPL(vhost_dev_cleanup); static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) { u64 a = addr / VHOST_PAGE_SIZE / 8; /* Make sure 64 bit math will not overflow. */ if (a > ULONG_MAX - (unsigned long)log_base || a + (unsigned long)log_base > ULONG_MAX) return 0; return access_ok(VERIFY_WRITE, log_base + a, (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); } /* Caller should have vq mutex and device mutex. */ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, int log_all) { int i; if (!mem) return 0; for (i = 0; i < mem->nregions; ++i) { struct vhost_memory_region *m = mem->regions + i; unsigned long a = m->userspace_addr; if (m->memory_size > ULONG_MAX) return 0; else if (!access_ok(VERIFY_WRITE, (void __user *)a, m->memory_size)) return 0; else if (log_all && !log_access_ok(log_base, m->guest_phys_addr, m->memory_size)) return 0; } return 1; } /* Can we switch to this memory table? */ /* Caller should have device mutex but not vq mutex */ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, int log_all) { int i; for (i = 0; i < d->nvqs; ++i) { int ok; bool log; mutex_lock(&d->vqs[i]->mutex); log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); /* If ring is inactive, will check when it's enabled. */ if (d->vqs[i]->private_data) ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log); else ok = 1; mutex_unlock(&d->vqs[i]->mutex); if (!ok) return 0; } return 1; } static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, struct vring_desc __user *desc, struct vring_avail __user *avail, struct vring_used __user *used) { size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return access_ok(VERIFY_READ, desc, num * sizeof *desc) && access_ok(VERIFY_READ, avail, sizeof *avail + num * sizeof *avail->ring + s) && access_ok(VERIFY_WRITE, used, sizeof *used + num * sizeof *used->ring + s); } /* Can we log writes? */ /* Caller should have device mutex but not vq mutex */ int vhost_log_access_ok(struct vhost_dev *dev) { return memory_access_ok(dev, dev->memory, 1); } EXPORT_SYMBOL_GPL(vhost_log_access_ok); /* Verify access for write logging. */ /* Caller should have vq mutex and device mutex */ static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) { size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return vq_memory_access_ok(log_base, vq->memory, vhost_has_feature(vq, VHOST_F_LOG_ALL)) && (!vq->log_used || log_access_ok(log_base, vq->log_addr, sizeof *vq->used + vq->num * sizeof *vq->used->ring + s)); } /* Can we start vq? */ /* Caller should have vq mutex and device mutex */ int vhost_vq_access_ok(struct vhost_virtqueue *vq) { return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) && vq_log_access_ok(vq, vq->log_base); } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2) { const struct vhost_memory_region *r1 = p1, *r2 = p2; if (r1->guest_phys_addr < r2->guest_phys_addr) return 1; if (r1->guest_phys_addr > r2->guest_phys_addr) return -1; return 0; } static void *vhost_kvzalloc(unsigned long size) { void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!n) { n = vzalloc(size); if (!n) return ERR_PTR(-ENOMEM); } return n; } static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem, *oldmem; unsigned long size = offsetof(struct vhost_memory, regions); int i; if (copy_from_user(&mem, m, size)) return -EFAULT; if (mem.padding) return -EOPNOTSUPP; if (mem.nregions > max_mem_regions) return -E2BIG; newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions)); if (!newmem) return -ENOMEM; memcpy(newmem, &mem, size); if (copy_from_user(newmem->regions, m->regions, mem.nregions * sizeof *m->regions)) { kvfree(newmem); return -EFAULT; } sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions), vhost_memory_reg_sort_cmp, NULL); if (!memory_access_ok(d, newmem, 0)) { kvfree(newmem); return -EFAULT; } oldmem = d->memory; d->memory = newmem; /* All memory accesses are done under some VQ mutex. */ for (i = 0; i < d->nvqs; ++i) { mutex_lock(&d->vqs[i]->mutex); d->vqs[i]->memory = newmem; mutex_unlock(&d->vqs[i]->mutex); } kvfree(oldmem); return 0; } long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) { struct file *eventfp, *filep = NULL; bool pollstart = false, pollstop = false; struct eventfd_ctx *ctx = NULL; u32 __user *idxp = argp; struct vhost_virtqueue *vq; struct vhost_vring_state s; struct vhost_vring_file f; struct vhost_vring_addr a; u32 idx; long r; r = get_user(idx, idxp); if (r < 0) return r; if (idx >= d->nvqs) return -ENOBUFS; vq = d->vqs[idx]; mutex_lock(&vq->mutex); switch (ioctl) { case VHOST_SET_VRING_NUM: /* Resizing ring with an active backend? * You don't want to do that. */ if (vq->private_data) { r = -EBUSY; break; } if (copy_from_user(&s, argp, sizeof s)) { r = -EFAULT; break; } if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { r = -EINVAL; break; } vq->num = s.num; break; case VHOST_SET_VRING_BASE: /* Moving base with an active backend? * You don't want to do that. */ if (vq->private_data) { r = -EBUSY; break; } if (copy_from_user(&s, argp, sizeof s)) { r = -EFAULT; break; } if (s.num > 0xffff) { r = -EINVAL; break; } vq->last_avail_idx = s.num; /* Forget the cached index value. */ vq->avail_idx = vq->last_avail_idx; break; case VHOST_GET_VRING_BASE: s.index = idx; s.num = vq->last_avail_idx; if (copy_to_user(argp, &s, sizeof s)) r = -EFAULT; break; case VHOST_SET_VRING_ADDR: if (copy_from_user(&a, argp, sizeof a)) { r = -EFAULT; break; } if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { r = -EOPNOTSUPP; break; } /* For 32bit, verify that the top 32bits of the user data are set to zero. */ if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || (u64)(unsigned long)a.used_user_addr != a.used_user_addr || (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) { r = -EFAULT; break; } /* Make sure it's safe to cast pointers to vring types. */ BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || (a.log_guest_addr & (sizeof(u64) - 1))) { r = -EINVAL; break; } /* We only verify access here if backend is configured. * If it is not, we don't as size might not have been setup. * We will verify when backend is configured. */ if (vq->private_data) { if (!vq_access_ok(vq, vq->num, (void __user *)(unsigned long)a.desc_user_addr, (void __user *)(unsigned long)a.avail_user_addr, (void __user *)(unsigned long)a.used_user_addr)) { r = -EINVAL; break; } /* Also validate log access for used ring if enabled. */ if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) && !log_access_ok(vq->log_base, a.log_guest_addr, sizeof *vq->used + vq->num * sizeof *vq->used->ring)) { r = -EINVAL; break; } } vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); vq->desc = (void __user *)(unsigned long)a.desc_user_addr; vq->avail = (void __user *)(unsigned long)a.avail_user_addr; vq->log_addr = a.log_guest_addr; vq->used = (void __user *)(unsigned long)a.used_user_addr; break; case VHOST_SET_VRING_KICK: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; break; } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != vq->kick) { pollstop = (filep = vq->kick) != NULL; pollstart = (vq->kick = eventfp) != NULL; } else filep = eventfp; break; case VHOST_SET_VRING_CALL: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; break; } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != vq->call) { filep = vq->call; ctx = vq->call_ctx; vq->call = eventfp; vq->call_ctx = eventfp ? eventfd_ctx_fileget(eventfp) : NULL; } else filep = eventfp; break; case VHOST_SET_VRING_ERR: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; break; } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != vq->error) { filep = vq->error; vq->error = eventfp; ctx = vq->error_ctx; vq->error_ctx = eventfp ? eventfd_ctx_fileget(eventfp) : NULL; } else filep = eventfp; break; case VHOST_SET_VRING_ENDIAN: r = vhost_set_vring_endian(vq, argp); break; case VHOST_GET_VRING_ENDIAN: r = vhost_get_vring_endian(vq, idx, argp); break; default: r = -ENOIOCTLCMD; } if (pollstop && vq->handle_kick) vhost_poll_stop(&vq->poll); if (ctx) eventfd_ctx_put(ctx); if (filep) fput(filep); if (pollstart && vq->handle_kick) r = vhost_poll_start(&vq->poll, vq->kick); mutex_unlock(&vq->mutex); if (pollstop && vq->handle_kick) vhost_poll_flush(&vq->poll); return r; } EXPORT_SYMBOL_GPL(vhost_vring_ioctl); /* Caller must have device mutex */ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) { struct file *eventfp, *filep = NULL; struct eventfd_ctx *ctx = NULL; u64 p; long r; int i, fd; /* If you are not the owner, you can become one */ if (ioctl == VHOST_SET_OWNER) { r = vhost_dev_set_owner(d); goto done; } /* You must be the owner to do anything else */ r = vhost_dev_check_owner(d); if (r) goto done; switch (ioctl) { case VHOST_SET_MEM_TABLE: r = vhost_set_memory(d, argp); break; case VHOST_SET_LOG_BASE: if (copy_from_user(&p, argp, sizeof p)) { r = -EFAULT; break; } if ((u64)(unsigned long)p != p) { r = -EFAULT; break; } for (i = 0; i < d->nvqs; ++i) { struct vhost_virtqueue *vq; void __user *base = (void __user *)(unsigned long)p; vq = d->vqs[i]; mutex_lock(&vq->mutex); /* If ring is inactive, will check when it's enabled. */ if (vq->private_data && !vq_log_access_ok(vq, base)) r = -EFAULT; else vq->log_base = base; mutex_unlock(&vq->mutex); } break; case VHOST_SET_LOG_FD: r = get_user(fd, (int __user *)argp); if (r < 0) break; eventfp = fd == -1 ? NULL : eventfd_fget(fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != d->log_file) { filep = d->log_file; ctx = d->log_ctx; d->log_ctx = eventfp ? eventfd_ctx_fileget(eventfp) : NULL; } else filep = eventfp; for (i = 0; i < d->nvqs; ++i) { mutex_lock(&d->vqs[i]->mutex); d->vqs[i]->log_ctx = d->log_ctx; mutex_unlock(&d->vqs[i]->mutex); } if (ctx) eventfd_ctx_put(ctx); if (filep) fput(filep); break; default: r = -ENOIOCTLCMD; break; } done: return r; } EXPORT_SYMBOL_GPL(vhost_dev_ioctl); static const struct vhost_memory_region *find_region(struct vhost_memory *mem, __u64 addr, __u32 len) { const struct vhost_memory_region *reg; int start = 0, end = mem->nregions; while (start < end) { int slot = start + (end - start) / 2; reg = mem->regions + slot; if (addr >= reg->guest_phys_addr) end = slot; else start = slot + 1; } reg = mem->regions + start; if (addr >= reg->guest_phys_addr && reg->guest_phys_addr + reg->memory_size > addr) return reg; return NULL; } /* TODO: This is really inefficient. We need something like get_user() * (instruction directly accesses the data, with an exception table entry * returning -EFAULT). See Documentation/x86/exception-tables.txt. */ static int set_bit_to_user(int nr, void __user *addr) { unsigned long log = (unsigned long)addr; struct page *page; void *base; int bit = nr + (log % PAGE_SIZE) * 8; int r; r = get_user_pages_fast(log, 1, 1, &page); if (r < 0) return r; BUG_ON(r != 1); base = kmap_atomic(page); set_bit(bit, base); kunmap_atomic(base); set_page_dirty_lock(page); put_page(page); return 0; } static int log_write(void __user *log_base, u64 write_address, u64 write_length) { u64 write_page = write_address / VHOST_PAGE_SIZE; int r; if (!write_length) return 0; write_length += write_address % VHOST_PAGE_SIZE; for (;;) { u64 base = (u64)(unsigned long)log_base; u64 log = base + write_page / 8; int bit = write_page % 8; if ((u64)(unsigned long)log != log) return -EFAULT; r = set_bit_to_user(bit, (void __user *)(unsigned long)log); if (r < 0) return r; if (write_length <= VHOST_PAGE_SIZE) break; write_length -= VHOST_PAGE_SIZE; write_page += 1; } return r; } int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, unsigned int log_num, u64 len) { int i, r; /* Make sure data written is seen before log. */ smp_wmb(); for (i = 0; i < log_num; ++i) { u64 l = min(log[i].len, len); r = log_write(vq->log_base, log[i].addr, l); if (r < 0) return r; len -= l; if (!len) { if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); return 0; } } /* Length written exceeds what we have stored. This is a bug. */ BUG(); return 0; } EXPORT_SYMBOL_GPL(vhost_log_write); static int vhost_update_used_flags(struct vhost_virtqueue *vq) { void __user *used; if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0) return -EFAULT; if (unlikely(vq->log_used)) { /* Make sure the flag is seen before log. */ smp_wmb(); /* Log used flag write. */ used = &vq->used->flags; log_write(vq->log_base, vq->log_addr + (used - (void __user *)vq->used), sizeof vq->used->flags); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } return 0; } static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) { if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq))) return -EFAULT; if (unlikely(vq->log_used)) { void __user *used; /* Make sure the event is seen before log. */ smp_wmb(); /* Log avail event write */ used = vhost_avail_event(vq); log_write(vq->log_base, vq->log_addr + (used - (void __user *)vq->used), sizeof *vhost_avail_event(vq)); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } return 0; } int vhost_init_used(struct vhost_virtqueue *vq) { __virtio16 last_used_idx; int r; if (!vq->private_data) { vq->is_le = virtio_legacy_is_little_endian(); return 0; } vhost_init_is_le(vq); r = vhost_update_used_flags(vq); if (r) return r; vq->signalled_used_valid = false; if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) return -EFAULT; r = __get_user(last_used_idx, &vq->used->idx); if (r) return r; vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); return 0; } EXPORT_SYMBOL_GPL(vhost_init_used); static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct iovec iov[], int iov_size) { const struct vhost_memory_region *reg; struct vhost_memory *mem; struct iovec *_iov; u64 s = 0; int ret = 0; mem = vq->memory; while ((u64)len > s) { u64 size; if (unlikely(ret >= iov_size)) { ret = -ENOBUFS; break; } reg = find_region(mem, addr, len); if (unlikely(!reg)) { ret = -EFAULT; break; } _iov = iov + ret; size = reg->memory_size - addr + reg->guest_phys_addr; _iov->iov_len = min((u64)len - s, size); _iov->iov_base = (void __user *)(unsigned long) (reg->userspace_addr + addr - reg->guest_phys_addr); s += size; addr += size; ++ret; } return ret; } /* Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, * or -1U if we're at the end. */ static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) return -1U; /* Check they're not leading us off end of descriptors. */ next = vhost16_to_cpu(vq, desc->next); /* Make sure compiler knows to grab that: we don't want it changing! */ /* We will use the result as an index in an array, so most * architectures only need a compiler barrier here. */ read_barrier_depends(); return next; } static int get_indirect(struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num, struct vring_desc *indirect) { struct vring_desc desc; unsigned int i = 0, count, found = 0; u32 len = vhost32_to_cpu(vq, indirect->len); struct iov_iter from; int ret; /* Sanity check */ if (unlikely(len % sizeof desc)) { vq_err(vq, "Invalid length in indirect descriptor: " "len 0x%llx not multiple of 0x%zx\n", (unsigned long long)len, sizeof desc); return -EINVAL; } ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, UIO_MAXIOV); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d in indirect.\n", ret); return ret; } iov_iter_init(&from, READ, vq->indirect, ret, len); /* We will use the result as an address to read from, so most * architectures only need a compiler barrier here. */ read_barrier_depends(); count = len / sizeof desc; /* Buffers are chained via a 16 bit next field, so * we can have at most 2^16 of these. */ if (unlikely(count > USHRT_MAX + 1)) { vq_err(vq, "Indirect buffer length too big: %d\n", indirect->len); return -E2BIG; } do { unsigned iov_count = *in_num + *out_num; if (unlikely(++found > count)) { vq_err(vq, "Loop detected: last one at %u " "indirect size %u\n", i, count); return -EINVAL; } if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) != sizeof(desc))) { vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; } if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; } ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), vhost32_to_cpu(vq, desc.len), iov + iov_count, iov_size - iov_count); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d indirect idx %d\n", ret, i); return ret; } /* If this is an input descriptor, increment that count. */ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { *in_num += ret; if (unlikely(log)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; } } else { /* If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (unlikely(*in_num)) { vq_err(vq, "Indirect descriptor " "has out after in: idx %d\n", i); return -EINVAL; } *out_num += ret; } } while ((i = next_desc(vq, &desc)) != -1); return 0; } /* This looks in the virtqueue and for the first available buffer, and converts * it to an iovec for convenient access. Since descriptors consist of some * number of output then some number of input descriptors, it's actually two * iovecs, but we pack them into one and note how many of each there were. * * This function returns the descriptor number found, or vq->num (which is * never a valid descriptor number) if none was found. A negative code is * returned on error. */ int vhost_get_vq_desc(struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num) { struct vring_desc desc; unsigned int i, head, found = 0; u16 last_avail_idx; __virtio16 avail_idx; __virtio16 ring_head; int ret; /* Check it isn't doing very strange things with descriptor numbers. */ last_avail_idx = vq->last_avail_idx; if (unlikely(__get_user(avail_idx, &vq->avail->idx))) { vq_err(vq, "Failed to access avail idx at %p\n", &vq->avail->idx); return -EFAULT; } vq->avail_idx = vhost16_to_cpu(vq, avail_idx); if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { vq_err(vq, "Guest moved used index from %u to %u", last_avail_idx, vq->avail_idx); return -EFAULT; } /* If there's nothing new since last we looked, return invalid. */ if (vq->avail_idx == last_avail_idx) return vq->num; /* Only get avail ring entries after they have been exposed by guest. */ smp_rmb(); /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ if (unlikely(__get_user(ring_head, &vq->avail->ring[last_avail_idx % vq->num]))) { vq_err(vq, "Failed to read head: idx %d address %p\n", last_avail_idx, &vq->avail->ring[last_avail_idx % vq->num]); return -EFAULT; } head = vhost16_to_cpu(vq, ring_head); /* If their number is silly, that's an error. */ if (unlikely(head >= vq->num)) { vq_err(vq, "Guest says index %u > %u is available", head, vq->num); return -EINVAL; } /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; if (unlikely(log)) *log_num = 0; i = head; do { unsigned iov_count = *in_num + *out_num; if (unlikely(i >= vq->num)) { vq_err(vq, "Desc index is %u > %u, head = %u", i, vq->num, head); return -EINVAL; } if (unlikely(++found > vq->num)) { vq_err(vq, "Loop detected: last one at %u " "vq size %u head %u\n", i, vq->num, head); return -EINVAL; } ret = __copy_from_user(&desc, vq->desc + i, sizeof desc); if (unlikely(ret)) { vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", i, vq->desc + i); return -EFAULT; } if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { ret = get_indirect(vq, iov, iov_size, out_num, in_num, log, log_num, &desc); if (unlikely(ret < 0)) { vq_err(vq, "Failure detected " "in indirect descriptor at idx %d\n", i); return ret; } continue; } ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), vhost32_to_cpu(vq, desc.len), iov + iov_count, iov_size - iov_count); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d descriptor idx %d\n", ret, i); return ret; } if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { /* If this is an input descriptor, * increment that count. */ *in_num += ret; if (unlikely(log)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; } } else { /* If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (unlikely(*in_num)) { vq_err(vq, "Descriptor has out after in: " "idx %d\n", i); return -EINVAL; } *out_num += ret; } } while ((i = next_desc(vq, &desc)) != -1); /* On success, increment avail index. */ vq->last_avail_idx++; /* Assume notifications from guest are disabled at this point, * if they aren't we would need to update avail_event index. */ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); return head; } EXPORT_SYMBOL_GPL(vhost_get_vq_desc); /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) { vq->last_avail_idx -= n; } EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); /* After we've used one of their buffers, we tell them about it. We'll then * want to notify the guest, using eventfd. */ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) { struct vring_used_elem heads = { cpu_to_vhost32(vq, head), cpu_to_vhost32(vq, len) }; return vhost_add_used_n(vq, &heads, 1); } EXPORT_SYMBOL_GPL(vhost_add_used); static int __vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { struct vring_used_elem __user *used; u16 old, new; int start; start = vq->last_used_idx % vq->num; used = vq->used->ring + start; if (count == 1) { if (__put_user(heads[0].id, &used->id)) { vq_err(vq, "Failed to write used id"); return -EFAULT; } if (__put_user(heads[0].len, &used->len)) { vq_err(vq, "Failed to write used len"); return -EFAULT; } } else if (__copy_to_user(used, heads, count * sizeof *used)) { vq_err(vq, "Failed to write used"); return -EFAULT; } if (unlikely(vq->log_used)) { /* Make sure data is seen before log. */ smp_wmb(); /* Log used ring entry write. */ log_write(vq->log_base, vq->log_addr + ((void __user *)used - (void __user *)vq->used), count * sizeof *used); } old = vq->last_used_idx; new = (vq->last_used_idx += count); /* If the driver never bothers to signal in a very long while, * used index might wrap around. If that happens, invalidate * signalled_used index we stored. TODO: make sure driver * signals at least once in 2^16 and remove this. */ if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) vq->signalled_used_valid = false; return 0; } /* After we've used one of their buffers, we tell them about it. We'll then * want to notify the guest, using eventfd. */ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { int start, n, r; start = vq->last_used_idx % vq->num; n = vq->num - start; if (n < count) { r = __vhost_add_used_n(vq, heads, n); if (r < 0) return r; heads += n; count -= n; } r = __vhost_add_used_n(vq, heads, count); /* Make sure buffer is written before we update index. */ smp_wmb(); if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) { vq_err(vq, "Failed to increment used idx"); return -EFAULT; } if (unlikely(vq->log_used)) { /* Log used index update. */ log_write(vq->log_base, vq->log_addr + offsetof(struct vring_used, idx), sizeof vq->used->idx); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } return r; } EXPORT_SYMBOL_GPL(vhost_add_used_n); static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { __u16 old, new; __virtio16 event; bool v; /* Flush out used index updates. This is paired * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && unlikely(vq->avail_idx == vq->last_avail_idx)) return true; if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { __virtio16 flags; if (__get_user(flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; } return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); } old = vq->signalled_used; v = vq->signalled_used_valid; new = vq->signalled_used = vq->last_used_idx; vq->signalled_used_valid = true; if (unlikely(!v)) return true; if (__get_user(event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } return vring_need_event(vhost16_to_cpu(vq, event), new, old); } /* This actually signals the guest, using eventfd. */ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) { /* Signal the Guest tell them we used something up. */ if (vq->call_ctx && vhost_notify(dev, vq)) eventfd_signal(vq->call_ctx, 1); } EXPORT_SYMBOL_GPL(vhost_signal); /* And here's the combo meal deal. Supersize me! */ void vhost_add_used_and_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq, unsigned int head, int len) { vhost_add_used(vq, head, len); vhost_signal(dev, vq); } EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); /* multi-buffer version of vhost_add_used_and_signal */ void vhost_add_used_and_signal_n(struct vhost_dev *dev, struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { vhost_add_used_n(vq, heads, count); vhost_signal(dev, vq); } EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); /* OK, now we need to know about added descriptors. */ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { __virtio16 avail_idx; int r; if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) return false; vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { r = vhost_update_used_flags(vq); if (r) { vq_err(vq, "Failed to enable notification at %p: %d\n", &vq->used->flags, r); return false; } } else { r = vhost_update_avail_event(vq, vq->avail_idx); if (r) { vq_err(vq, "Failed to update avail event index at %p: %d\n", vhost_avail_event(vq), r); return false; } } /* They could have slipped one in as we were doing that: make * sure it's written, then check again. */ smp_mb(); r = __get_user(avail_idx, &vq->avail->idx); if (r) { vq_err(vq, "Failed to check avail idx at %p: %d\n", &vq->avail->idx, r); return false; } return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; } EXPORT_SYMBOL_GPL(vhost_enable_notify); /* We don't need to be notified again. */ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { int r; if (vq->used_flags & VRING_USED_F_NO_NOTIFY) return; vq->used_flags |= VRING_USED_F_NO_NOTIFY; if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { r = vhost_update_used_flags(vq); if (r) vq_err(vq, "Failed to enable notification at %p: %d\n", &vq->used->flags, r); } } EXPORT_SYMBOL_GPL(vhost_disable_notify); static int __init vhost_init(void) { return 0; } static void __exit vhost_exit(void) { } module_init(vhost_init); module_exit(vhost_exit); MODULE_VERSION("0.0.1"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Michael S. Tsirkin"); MODULE_DESCRIPTION("Host kernel accelerator for virtio");
./CrossVul/dataset_final_sorted/CWE-399/c/bad_1713_0
crossvul-cpp_data_good_4984_1
/* * linux/fs/pipe.c * * Copyright (C) 1991, 1992, 1999 Linus Torvalds */ #include <linux/mm.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/log2.h> #include <linux/mount.h> #include <linux/magic.h> #include <linux/pipe_fs_i.h> #include <linux/uio.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/audit.h> #include <linux/syscalls.h> #include <linux/fcntl.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include "internal.h" /* * The max size that a non-root user is allowed to grow the pipe. Can * be set by root in /proc/sys/fs/pipe-max-size */ unsigned int pipe_max_size = 1048576; /* * Minimum pipe size, as required by POSIX */ unsigned int pipe_min_size = PAGE_SIZE; /* Maximum allocatable pages per user. Hard limit is unset by default, soft * matches default values. */ unsigned long pipe_user_pages_hard; unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; /* * We use a start+len construction, which provides full use of the * allocated memory. * -- Florian Coosmann (FGC) * * Reads with count = 0 should always return 0. * -- Julian Bradfield 1999-06-07. * * FIFOs and Pipes now generate SIGIO for both readers and writers. * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16 * * pipe_read & write cleanup * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 */ static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) { if (pipe->files) mutex_lock_nested(&pipe->mutex, subclass); } void pipe_lock(struct pipe_inode_info *pipe) { /* * pipe_lock() nests non-pipe inode locks (for writing to a file) */ pipe_lock_nested(pipe, I_MUTEX_PARENT); } EXPORT_SYMBOL(pipe_lock); void pipe_unlock(struct pipe_inode_info *pipe) { if (pipe->files) mutex_unlock(&pipe->mutex); } EXPORT_SYMBOL(pipe_unlock); static inline void __pipe_lock(struct pipe_inode_info *pipe) { mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); } static inline void __pipe_unlock(struct pipe_inode_info *pipe) { mutex_unlock(&pipe->mutex); } void pipe_double_lock(struct pipe_inode_info *pipe1, struct pipe_inode_info *pipe2) { BUG_ON(pipe1 == pipe2); if (pipe1 < pipe2) { pipe_lock_nested(pipe1, I_MUTEX_PARENT); pipe_lock_nested(pipe2, I_MUTEX_CHILD); } else { pipe_lock_nested(pipe2, I_MUTEX_PARENT); pipe_lock_nested(pipe1, I_MUTEX_CHILD); } } /* Drop the inode semaphore and wait for a pipe event, atomically */ void pipe_wait(struct pipe_inode_info *pipe) { DEFINE_WAIT(wait); /* * Pipes are system-local resources, so sleeping on them * is considered a noninteractive wait: */ prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); pipe_unlock(pipe); schedule(); finish_wait(&pipe->wait, &wait); pipe_lock(pipe); } static void anon_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; /* * If nobody else uses this page, and we don't already have a * temporary page, let's keep track of it as a one-deep * allocation cache. (Otherwise just release our reference to it) */ if (page_count(page) == 1 && !pipe->tmp_page) pipe->tmp_page = page; else page_cache_release(page); } /** * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to attempt to steal * * Description: * This function attempts to steal the &struct page attached to * @buf. If successful, this function returns 0 and returns with * the page locked. The caller may then reuse the page for whatever * he wishes; the typical use is insertion into a different file * page cache. */ int generic_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; /* * A reference of one is golden, that means that the owner of this * page is the only one holding a reference to it. lock the page * and return OK. */ if (page_count(page) == 1) { lock_page(page); return 0; } return 1; } EXPORT_SYMBOL(generic_pipe_buf_steal); /** * generic_pipe_buf_get - get a reference to a &struct pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to get a reference to * * Description: * This function grabs an extra reference to @buf. It's used in * in the tee() system call, when we duplicate the buffers in one * pipe into another. */ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { page_cache_get(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_get); /** * generic_pipe_buf_confirm - verify contents of the pipe buffer * @info: the pipe that the buffer belongs to * @buf: the buffer to confirm * * Description: * This function does nothing, because the generic pipe code uses * pages that are always good when inserted into the pipe. */ int generic_pipe_buf_confirm(struct pipe_inode_info *info, struct pipe_buffer *buf) { return 0; } EXPORT_SYMBOL(generic_pipe_buf_confirm); /** * generic_pipe_buf_release - put a reference to a &struct pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to put a reference to * * Description: * This function releases a reference to @buf. */ void generic_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { page_cache_release(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_release); static const struct pipe_buf_operations anon_pipe_buf_ops = { .can_merge = 1, .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static const struct pipe_buf_operations packet_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static ssize_t pipe_read(struct kiocb *iocb, struct iov_iter *to) { size_t total_len = iov_iter_count(to); struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; int do_wakeup; ssize_t ret; /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; __pipe_lock(pipe); for (;;) { int bufs = pipe->nrbufs; if (bufs) { int curbuf = pipe->curbuf; struct pipe_buffer *buf = pipe->bufs + curbuf; const struct pipe_buf_operations *ops = buf->ops; size_t chars = buf->len; size_t written; int error; if (chars > total_len) chars = total_len; error = ops->confirm(pipe, buf); if (error) { if (!ret) ret = error; break; } written = copy_page_to_iter(buf->page, buf->offset, chars, to); if (unlikely(written < chars)) { if (!ret) ret = -EFAULT; break; } ret += chars; buf->offset += chars; buf->len -= chars; /* Was it a packet buffer? Clean up and exit */ if (buf->flags & PIPE_BUF_FLAG_PACKET) { total_len = chars; buf->len = 0; } if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); curbuf = (curbuf + 1) & (pipe->buffers - 1); pipe->curbuf = curbuf; pipe->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!pipe->writers) break; if (!pipe->waiting_writers) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); } __pipe_unlock(pipe); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; } static inline int is_packetized(struct file *file) { return (file->f_flags & O_DIRECT) != 0; } static ssize_t pipe_write(struct kiocb *iocb, struct iov_iter *from) { struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; ssize_t ret = 0; int do_wakeup = 0; size_t total_len = iov_iter_count(from); ssize_t chars; /* Null write succeeds. */ if (unlikely(total_len == 0)) return 0; __pipe_lock(pipe); if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; goto out; } /* We try to merge small writes */ chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ if (pipe->nrbufs && chars != 0) { int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & (pipe->buffers - 1); struct pipe_buffer *buf = pipe->bufs + lastbuf; const struct pipe_buf_operations *ops = buf->ops; int offset = buf->offset + buf->len; if (ops->can_merge && offset + chars <= PAGE_SIZE) { ret = ops->confirm(pipe, buf); if (ret) goto out; ret = copy_page_from_iter(buf->page, offset, chars, from); if (unlikely(ret < chars)) { ret = -EFAULT; goto out; } do_wakeup = 1; buf->len += ret; if (!iov_iter_count(from)) goto out; } } for (;;) { int bufs; if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } bufs = pipe->nrbufs; if (bufs < pipe->buffers) { int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); struct pipe_buffer *buf = pipe->bufs + newbuf; struct page *page = pipe->tmp_page; int copied; if (!page) { page = alloc_page(GFP_HIGHUSER); if (unlikely(!page)) { ret = ret ? : -ENOMEM; break; } pipe->tmp_page = page; } /* Always wake up, even if the copy fails. Otherwise * we lock up (O_NONBLOCK-)readers that sleep due to * syscall merging. * FIXME! Is this really true? */ do_wakeup = 1; copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { if (!ret) ret = -EFAULT; break; } ret += copied; /* Insert it into the buffer array */ buf->page = page; buf->ops = &anon_pipe_buf_ops; buf->offset = 0; buf->len = copied; buf->flags = 0; if (is_packetized(filp)) { buf->ops = &packet_pipe_buf_ops; buf->flags = PIPE_BUF_FLAG_PACKET; } pipe->nrbufs = ++bufs; pipe->tmp_page = NULL; if (!iov_iter_count(from)) break; } if (bufs < pipe->buffers) continue; if (filp->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } out: __pipe_unlock(pipe); if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { int err = file_update_time(filp); if (err) ret = err; sb_end_write(file_inode(filp)->i_sb); } return ret; } static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe = filp->private_data; int count, buf, nrbufs; switch (cmd) { case FIONREAD: __pipe_lock(pipe); count = 0; buf = pipe->curbuf; nrbufs = pipe->nrbufs; while (--nrbufs >= 0) { count += pipe->bufs[buf].len; buf = (buf+1) & (pipe->buffers - 1); } __pipe_unlock(pipe); return put_user(count, (int __user *)arg); default: return -ENOIOCTLCMD; } } /* No kernel lock held - fine */ static unsigned int pipe_poll(struct file *filp, poll_table *wait) { unsigned int mask; struct pipe_inode_info *pipe = filp->private_data; int nrbufs; poll_wait(filp, &pipe->wait, wait); /* Reading only -- no need for acquiring the semaphore. */ nrbufs = pipe->nrbufs; mask = 0; if (filp->f_mode & FMODE_READ) { mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; if (!pipe->writers && filp->f_version != pipe->w_counter) mask |= POLLHUP; } if (filp->f_mode & FMODE_WRITE) { mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0; /* * Most Unices do not set POLLERR for FIFOs but on Linux they * behave exactly like pipes for poll(). */ if (!pipe->readers) mask |= POLLERR; } return mask; } static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) { int kill = 0; spin_lock(&inode->i_lock); if (!--pipe->files) { inode->i_pipe = NULL; kill = 1; } spin_unlock(&inode->i_lock); if (kill) free_pipe_info(pipe); } static int pipe_release(struct inode *inode, struct file *file) { struct pipe_inode_info *pipe = file->private_data; __pipe_lock(pipe); if (file->f_mode & FMODE_READ) pipe->readers--; if (file->f_mode & FMODE_WRITE) pipe->writers--; if (pipe->readers || pipe->writers) { wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } __pipe_unlock(pipe); put_pipe_info(inode, pipe); return 0; } static int pipe_fasync(int fd, struct file *filp, int on) { struct pipe_inode_info *pipe = filp->private_data; int retval = 0; __pipe_lock(pipe); if (filp->f_mode & FMODE_READ) retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); if (retval < 0 && (filp->f_mode & FMODE_READ)) /* this can happen only if on == T */ fasync_helper(-1, filp, 0, &pipe->fasync_readers); } __pipe_unlock(pipe); return retval; } static void account_pipe_buffers(struct pipe_inode_info *pipe, unsigned long old, unsigned long new) { atomic_long_add(new - old, &pipe->user->pipe_bufs); } static bool too_many_pipe_buffers_soft(struct user_struct *user) { return pipe_user_pages_soft && atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft; } static bool too_many_pipe_buffers_hard(struct user_struct *user) { return pipe_user_pages_hard && atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard; } struct pipe_inode_info *alloc_pipe_info(void) { struct pipe_inode_info *pipe; pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL); if (pipe) { unsigned long pipe_bufs = PIPE_DEF_BUFFERS; struct user_struct *user = get_current_user(); if (!too_many_pipe_buffers_hard(user)) { if (too_many_pipe_buffers_soft(user)) pipe_bufs = 1; pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL); } if (pipe->bufs) { init_waitqueue_head(&pipe->wait); pipe->r_counter = pipe->w_counter = 1; pipe->buffers = pipe_bufs; pipe->user = user; account_pipe_buffers(pipe, 0, pipe_bufs); mutex_init(&pipe->mutex); return pipe; } free_uid(user); kfree(pipe); } return NULL; } void free_pipe_info(struct pipe_inode_info *pipe) { int i; account_pipe_buffers(pipe, pipe->buffers, 0); free_uid(pipe->user); for (i = 0; i < pipe->buffers; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) buf->ops->release(pipe, buf); } if (pipe->tmp_page) __free_page(pipe->tmp_page); kfree(pipe->bufs); kfree(pipe); } static struct vfsmount *pipe_mnt __read_mostly; /* * pipefs_dname() is called from d_path(). */ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", d_inode(dentry)->i_ino); } static const struct dentry_operations pipefs_dentry_operations = { .d_dname = pipefs_dname, }; static struct inode * get_pipe_inode(void) { struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); struct pipe_inode_info *pipe; if (!inode) goto fail_inode; inode->i_ino = get_next_ino(); pipe = alloc_pipe_info(); if (!pipe) goto fail_iput; inode->i_pipe = pipe; pipe->files = 2; pipe->readers = pipe->writers = 1; inode->i_fop = &pipefifo_fops; /* * Mark the inode dirty from the very beginning, * that way it will never be moved to the dirty * list because "mark_inode_dirty()" will think * that it already _is_ on the dirty list. */ inode->i_state = I_DIRTY; inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; return inode; fail_iput: iput(inode); fail_inode: return NULL; } int create_pipe_files(struct file **res, int flags) { int err; struct inode *inode = get_pipe_inode(); struct file *f; struct path path; static struct qstr name = { .name = "" }; if (!inode) return -ENFILE; err = -ENOMEM; path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name); if (!path.dentry) goto err_inode; path.mnt = mntget(pipe_mnt); d_instantiate(path.dentry, inode); f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops); if (IS_ERR(f)) { err = PTR_ERR(f); goto err_dentry; } f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)); f->private_data = inode->i_pipe; res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops); if (IS_ERR(res[0])) { err = PTR_ERR(res[0]); goto err_file; } path_get(&path); res[0]->private_data = inode->i_pipe; res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK); res[1] = f; return 0; err_file: put_filp(f); err_dentry: free_pipe_info(inode->i_pipe); path_put(&path); return err; err_inode: free_pipe_info(inode->i_pipe); iput(inode); return err; } static int __do_pipe_flags(int *fd, struct file **files, int flags) { int error; int fdw, fdr; if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) return -EINVAL; error = create_pipe_files(files, flags); if (error) return error; error = get_unused_fd_flags(flags); if (error < 0) goto err_read_pipe; fdr = error; error = get_unused_fd_flags(flags); if (error < 0) goto err_fdr; fdw = error; audit_fd_pair(fdr, fdw); fd[0] = fdr; fd[1] = fdw; return 0; err_fdr: put_unused_fd(fdr); err_read_pipe: fput(files[0]); fput(files[1]); return error; } int do_pipe_flags(int *fd, int flags) { struct file *files[2]; int error = __do_pipe_flags(fd, files, flags); if (!error) { fd_install(fd[0], files[0]); fd_install(fd[1], files[1]); } return error; } /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way Unix traditionally does this, though. */ SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags) { struct file *files[2]; int fd[2]; int error; error = __do_pipe_flags(fd, files, flags); if (!error) { if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) { fput(files[0]); fput(files[1]); put_unused_fd(fd[0]); put_unused_fd(fd[1]); error = -EFAULT; } else { fd_install(fd[0], files[0]); fd_install(fd[1], files[1]); } } return error; } SYSCALL_DEFINE1(pipe, int __user *, fildes) { return sys_pipe2(fildes, 0); } static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) { int cur = *cnt; while (cur == *cnt) { pipe_wait(pipe); if (signal_pending(current)) break; } return cur == *cnt ? -ERESTARTSYS : 0; } static void wake_up_partner(struct pipe_inode_info *pipe) { wake_up_interruptible(&pipe->wait); } static int fifo_open(struct inode *inode, struct file *filp) { struct pipe_inode_info *pipe; bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; int ret; filp->f_version = 0; spin_lock(&inode->i_lock); if (inode->i_pipe) { pipe = inode->i_pipe; pipe->files++; spin_unlock(&inode->i_lock); } else { spin_unlock(&inode->i_lock); pipe = alloc_pipe_info(); if (!pipe) return -ENOMEM; pipe->files = 1; spin_lock(&inode->i_lock); if (unlikely(inode->i_pipe)) { inode->i_pipe->files++; spin_unlock(&inode->i_lock); free_pipe_info(pipe); pipe = inode->i_pipe; } else { inode->i_pipe = pipe; spin_unlock(&inode->i_lock); } } filp->private_data = pipe; /* OK, we have a pipe and it's pinned down */ __pipe_lock(pipe); /* We can only do regular read/write on fifos */ filp->f_mode &= (FMODE_READ | FMODE_WRITE); switch (filp->f_mode) { case FMODE_READ: /* * O_RDONLY * POSIX.1 says that O_NONBLOCK means return with the FIFO * opened, even when there is no process writing the FIFO. */ pipe->r_counter++; if (pipe->readers++ == 0) wake_up_partner(pipe); if (!is_pipe && !pipe->writers) { if ((filp->f_flags & O_NONBLOCK)) { /* suppress POLLHUP until we have * seen a writer */ filp->f_version = pipe->w_counter; } else { if (wait_for_partner(pipe, &pipe->w_counter)) goto err_rd; } } break; case FMODE_WRITE: /* * O_WRONLY * POSIX.1 says that O_NONBLOCK means return -1 with * errno=ENXIO when there is no process reading the FIFO. */ ret = -ENXIO; if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) goto err; pipe->w_counter++; if (!pipe->writers++) wake_up_partner(pipe); if (!is_pipe && !pipe->readers) { if (wait_for_partner(pipe, &pipe->r_counter)) goto err_wr; } break; case FMODE_READ | FMODE_WRITE: /* * O_RDWR * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. * This implementation will NEVER block on a O_RDWR open, since * the process can at least talk to itself. */ pipe->readers++; pipe->writers++; pipe->r_counter++; pipe->w_counter++; if (pipe->readers == 1 || pipe->writers == 1) wake_up_partner(pipe); break; default: ret = -EINVAL; goto err; } /* Ok! */ __pipe_unlock(pipe); return 0; err_rd: if (!--pipe->readers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err_wr: if (!--pipe->writers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err: __pipe_unlock(pipe); put_pipe_info(inode, pipe); return ret; } const struct file_operations pipefifo_fops = { .open = fifo_open, .llseek = no_llseek, .read_iter = pipe_read, .write_iter = pipe_write, .poll = pipe_poll, .unlocked_ioctl = pipe_ioctl, .release = pipe_release, .fasync = pipe_fasync, }; /* * Allocate a new array of pipe buffers and copy the info over. Returns the * pipe size if successful, or return -ERROR on error. */ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) { struct pipe_buffer *bufs; /* * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't * expect a lot of shrink+grow operations, just free and allocate * again like we would do for growing. If the pipe currently * contains more buffers than arg, then return busy. */ if (nr_pages < pipe->nrbufs) return -EBUSY; bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN); if (unlikely(!bufs)) return -ENOMEM; /* * The pipe array wraps around, so just start the new one at zero * and adjust the indexes. */ if (pipe->nrbufs) { unsigned int tail; unsigned int head; tail = pipe->curbuf + pipe->nrbufs; if (tail < pipe->buffers) tail = 0; else tail &= (pipe->buffers - 1); head = pipe->nrbufs - tail; if (head) memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer)); if (tail) memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); } account_pipe_buffers(pipe, pipe->buffers, nr_pages); pipe->curbuf = 0; kfree(pipe->bufs); pipe->bufs = bufs; pipe->buffers = nr_pages; return nr_pages * PAGE_SIZE; } /* * Currently we rely on the pipe array holding a power-of-2 number * of pages. */ static inline unsigned int round_pipe_size(unsigned int size) { unsigned long nr_pages; nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; } /* * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax * will return an error. */ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec_minmax(table, write, buf, lenp, ppos); if (ret < 0 || !write) return ret; pipe_max_size = round_pipe_size(pipe_max_size); return ret; } /* * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same * location, so checking ->i_pipe is not enough to verify that this is a * pipe. */ struct pipe_inode_info *get_pipe_info(struct file *file) { return file->f_op == &pipefifo_fops ? file->private_data : NULL; } long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe; long ret; pipe = get_pipe_info(file); if (!pipe) return -EBADF; __pipe_lock(pipe); switch (cmd) { case F_SETPIPE_SZ: { unsigned int size, nr_pages; size = round_pipe_size(arg); nr_pages = size >> PAGE_SHIFT; ret = -EINVAL; if (!nr_pages) goto out; if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) { ret = -EPERM; goto out; } else if ((too_many_pipe_buffers_hard(pipe->user) || too_many_pipe_buffers_soft(pipe->user)) && !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto out; } ret = pipe_set_size(pipe, nr_pages); break; } case F_GETPIPE_SZ: ret = pipe->buffers * PAGE_SIZE; break; default: ret = -EINVAL; break; } out: __pipe_unlock(pipe); return ret; } static const struct super_operations pipefs_ops = { .destroy_inode = free_inode_nonrcu, .statfs = simple_statfs, }; /* * pipefs should _never_ be mounted by userland - too much of security hassle, * no real gain from having the whole whorehouse mounted. So we don't need * any operations on the root directory. However, we need a non-trivial * d_name - pipe: will go nicely and kill the special-casing in procfs. */ static struct dentry *pipefs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "pipe:", &pipefs_ops, &pipefs_dentry_operations, PIPEFS_MAGIC); } static struct file_system_type pipe_fs_type = { .name = "pipefs", .mount = pipefs_mount, .kill_sb = kill_anon_super, }; static int __init init_pipe_fs(void) { int err = register_filesystem(&pipe_fs_type); if (!err) { pipe_mnt = kern_mount(&pipe_fs_type); if (IS_ERR(pipe_mnt)) { err = PTR_ERR(pipe_mnt); unregister_filesystem(&pipe_fs_type); } } return err; } fs_initcall(init_pipe_fs);
./CrossVul/dataset_final_sorted/CWE-399/c/good_4984_1
crossvul-cpp_data_good_5863_0
/* * Network Block Device - server * * Copyright 1996-1998 Pavel Machek, distribute under GPL * <pavel@atrey.karlin.mff.cuni.cz> * Copyright 2001-2004 Wouter Verhelst <wouter@debian.org> * Copyright 2002 Anton Altaparmakov <aia21@cam.ac.uk> * * Version 1.0 - hopefully 64-bit-clean * Version 1.1 - merging enhancements from Josh Parsons, <josh@coombs.anu.edu.au> * Version 1.2 - autodetect size of block devices, thanx to Peter T. Breuer" <ptb@it.uc3m.es> * Version 1.5 - can compile on Unix systems that don't have 64 bit integer * type, or don't have 64 bit file offsets by defining FS_32BIT * in compile options for nbd-server *only*. This can be done * with make FSCHOICE=-DFS_32BIT nbd-server. (I don't have the * original autoconf input file, or I would make it a configure * option.) Ken Yap <ken@nlc.net.au>. * Version 1.6 - fix autodetection of block device size and really make 64 bit * clean on 32 bit machines. Anton Altaparmakov <aia21@cam.ac.uk> * Version 2.0 - Version synchronised with client * Version 2.1 - Reap zombie client processes when they exit. Removed * (uncommented) the _IO magic, it's no longer necessary. Wouter * Verhelst <wouter@debian.org> * Version 2.2 - Auto switch to read-only mode (usefull for floppies). * Version 2.3 - Fixed code so that Large File Support works. This * removes the FS_32BIT compile-time directive; define * _FILE_OFFSET_BITS=64 and _LARGEFILE_SOURCE if you used to be * using FS_32BIT. This will allow you to use files >2GB instead of * having to use the -m option. Wouter Verhelst <wouter@debian.org> * Version 2.4 - Added code to keep track of children, so that we can * properly kill them from initscripts. Add a call to daemon(), * so that processes don't think they have to wait for us, which is * interesting for initscripts as well. Wouter Verhelst * <wouter@debian.org> * Version 2.5 - Bugfix release: forgot to reset child_arraysize to * zero after fork()ing, resulting in nbd-server going berserk * when it receives a signal with at least one child open. Wouter * Verhelst <wouter@debian.org> * 10/10/2003 - Added socket option SO_KEEPALIVE (sf.net bug 819235); * rectified type of mainloop::size_host (sf.net bugs 814435 and * 817385); close the PID file after writing to it, so that the * daemon can actually be found. Wouter Verhelst * <wouter@debian.org> * 10/10/2003 - Size of the data "size_host" was wrong and so was not * correctly put in network endianness. Many types were corrected * (size_t and off_t instead of int). <vspaceg@sourceforge.net> * Version 2.6 - Some code cleanup. * Version 2.7 - Better build system. * 11/02/2004 - Doxygenified the source, modularized it a bit. Needs a * lot more work, but this is a start. Wouter Verhelst * <wouter@debian.org> * 16/03/2010 - Add IPv6 support. * Kitt Tientanopajai <kitt@kitty.in.th> * Neutron Soutmun <neo.neutron@gmail.com> * Suriya Soutmun <darksolar@gmail.com> */ /* Includes LFS defines, which defines behaviours of some of the following * headers, so must come before those */ #include "lfs.h" #include <assert.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/select.h> #include <sys/wait.h> #ifdef HAVE_SYS_IOCTL_H #include <sys/ioctl.h> #endif #include <sys/param.h> #ifdef HAVE_SYS_MOUNT_H #include <sys/mount.h> #endif #include <signal.h> #include <errno.h> #include <netinet/tcp.h> #include <netinet/in.h> #include <netdb.h> #include <syslog.h> #include <unistd.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <fcntl.h> #if HAVE_FALLOC_PH #include <linux/falloc.h> #endif #include <arpa/inet.h> #include <strings.h> #include <dirent.h> #include <unistd.h> #include <getopt.h> #include <pwd.h> #include <grp.h> #include <dirent.h> #include <glib.h> /* used in cliserv.h, so must come first */ #define MY_NAME "nbd_server" #include "cliserv.h" #include "netdb-compat.h" #ifdef WITH_SDP #include <sdp_inet.h> #endif /** Default position of the config file */ #ifndef SYSCONFDIR #define SYSCONFDIR "/etc" #endif #define CFILE SYSCONFDIR "/nbd-server/config" /** Where our config file actually is */ gchar* config_file_pos; /** global flags */ int glob_flags=0; /* Whether we should avoid forking */ int dontfork = 0; /** Logging macros, now nothing goes to syslog unless you say ISSERVER */ #ifdef ISSERVER #define msg(prio, ...) syslog(prio, __VA_ARGS__) #else #define msg(prio, ...) g_log(G_LOG_DOMAIN, G_LOG_LEVEL_MESSAGE, __VA_ARGS__) #endif /* Debugging macros */ //#define DODBG #ifdef DODBG #define DEBUG(...) printf(__VA_ARGS__) #else #define DEBUG(...) #endif #ifndef PACKAGE_VERSION #define PACKAGE_VERSION "" #endif /** * The highest value a variable of type off_t can reach. This is a signed * integer, so set all bits except for the leftmost one. **/ #define OFFT_MAX ~((off_t)1<<(sizeof(off_t)*8-1)) #define LINELEN 256 /**< Size of static buffer used to read the authorization file (yuck) */ #define BUFSIZE ((1024*1024)+sizeof(struct nbd_reply)) /**< Size of buffer that can hold requests */ #define DIFFPAGESIZE 4096 /**< diff file uses those chunks */ /** Per-export flags: */ #define F_READONLY 1 /**< flag to tell us a file is readonly */ #define F_MULTIFILE 2 /**< flag to tell us a file is exported using -m */ #define F_COPYONWRITE 4 /**< flag to tell us a file is exported using copyonwrite */ #define F_AUTOREADONLY 8 /**< flag to tell us a file is set to autoreadonly */ #define F_SPARSE 16 /**< flag to tell us copyronwrite should use a sparse file */ #define F_SDP 32 /**< flag to tell us the export should be done using the Socket Direct Protocol for RDMA */ #define F_SYNC 64 /**< Whether to fsync() after a write */ #define F_FLUSH 128 /**< Whether server wants FLUSH to be sent by the client */ #define F_FUA 256 /**< Whether server wants FUA to be sent by the client */ #define F_ROTATIONAL 512 /**< Whether server wants the client to implement the elevator algorithm */ #define F_TEMPORARY 1024 /**< Whether the backing file is temporary and should be created then unlinked */ #define F_TRIM 2048 /**< Whether server wants TRIM (discard) to be sent by the client */ #define F_FIXED 4096 /**< Client supports fixed new-style protocol (and can thus send us extra options */ /** Global flags: */ #define F_OLDSTYLE 1 /**< Allow oldstyle (port-based) exports */ #define F_LIST 2 /**< Allow clients to list the exports on a server */ GHashTable *children; char pidfname[256]; /**< name of our PID file */ char pidftemplate[256]; /**< template to be used for the filename of the PID file */ char default_authname[] = SYSCONFDIR "/nbd-server/allow"; /**< default name of allow file */ #define NEG_INIT (1 << 0) #define NEG_OLD (1 << 1) #define NEG_MODERN (1 << 2) static volatile sig_atomic_t is_sighup_caught; /**< Flag set by SIGHUP handler to mark a reconfiguration request */ GArray* modernsocks; /**< Sockets for the modern handler. Not used if a client was only specified on the command line; only port used if oldstyle is set to false (and then the command-line client isn't used, gna gna). This may be more than one socket on systems that don't support serving IPv4 and IPv6 from the same socket (like, e.g., FreeBSD) */ bool logged_oversized=false; /**< whether we logged oversized requests already */ /** * Types of virtuatlization **/ typedef enum { VIRT_NONE=0, /**< No virtualization */ VIRT_IPLIT, /**< Literal IP address as part of the filename */ VIRT_IPHASH, /**< Replacing all dots in an ip address by a / before doing the same as in IPLIT */ VIRT_CIDR, /**< Every subnet in its own directory */ } VIRT_STYLE; /** * Variables associated with a server. **/ typedef struct { gchar* exportname; /**< (unprocessed) filename of the file we're exporting */ off_t expected_size; /**< size of the exported file as it was told to us through configuration */ gchar* listenaddr; /**< The IP address we're listening on */ unsigned int port; /**< port we're exporting this file at */ char* authname; /**< filename of the authorization file */ int flags; /**< flags associated with this exported file */ int socket; /**< The socket of this server. */ int socket_family; /**< family of the socket */ VIRT_STYLE virtstyle;/**< The style of virtualization, if any */ uint8_t cidrlen; /**< The length of the mask when we use CIDR-style virtualization */ gchar* prerun; /**< command to be ran after connecting a client, but before starting to serve */ gchar* postrun; /**< command that will be ran after the client disconnects */ gchar* servename; /**< name of the export as selected by nbd-client */ int max_connections; /**< maximum number of opened connections */ gchar* transactionlog;/**< filename for transaction log */ } SERVER; /** * Variables associated with a client socket. **/ typedef struct { int fhandle; /**< file descriptor */ off_t startoff; /**< starting offset of this file */ } FILE_INFO; typedef struct { off_t exportsize; /**< size of the file we're exporting */ char *clientname; /**< peer */ char *exportname; /**< (processed) filename of the file we're exporting */ GArray *export; /**< array of FILE_INFO of exported files; array size is always 1 unless we're doing the multiple file option */ int net; /**< The actual client socket */ SERVER *server; /**< The server this client is getting data from */ char* difffilename; /**< filename of the copy-on-write file, if any */ int difffile; /**< filedescriptor of copyonwrite file. @todo shouldn't this be an array too? (cfr export) Or make -m and -c mutually exclusive */ u32 difffilelen; /**< number of pages in difffile */ u32 *difmap; /**< see comment on the global difmap for this one */ gboolean modern; /**< client was negotiated using modern negotiation protocol */ int transactionlogfd;/**< fd for transaction log */ int clientfeats; /**< Features supported by this client */ } CLIENT; /** * Type of configuration file values **/ typedef enum { PARAM_INT, /**< This parameter is an integer */ PARAM_INT64, /**< This parameter is an integer */ PARAM_STRING, /**< This parameter is a string */ PARAM_BOOL, /**< This parameter is a boolean */ } PARAM_TYPE; /** * Configuration file values **/ typedef struct { gchar *paramname; /**< Name of the parameter, as it appears in the config file */ gboolean required; /**< Whether this is a required (as opposed to optional) parameter */ PARAM_TYPE ptype; /**< Type of the parameter. */ gpointer target; /**< Pointer to where the data of this parameter should be written. If ptype is PARAM_BOOL, the data is or'ed rather than overwritten. */ gint flagval; /**< Flag mask for this parameter in case ptype is PARAM_BOOL. */ } PARAM; /** * Configuration file values of the "generic" section **/ struct generic_conf { gchar *user; /**< user we run the server as */ gchar *group; /**< group we run running as */ gchar *modernaddr; /**< address of the modern socket */ gchar *modernport; /**< port of the modern socket */ gint flags; /**< global flags */ }; /** * Translate a command name into human readable form * * @param command The command number (after applying NBD_CMD_MASK_COMMAND) * @return pointer to the command name **/ static inline const char * getcommandname(uint64_t command) { switch (command) { case NBD_CMD_READ: return "NBD_CMD_READ"; case NBD_CMD_WRITE: return "NBD_CMD_WRITE"; case NBD_CMD_DISC: return "NBD_CMD_DISC"; case NBD_CMD_FLUSH: return "NBD_CMD_FLUSH"; case NBD_CMD_TRIM: return "NBD_CMD_TRIM"; default: return "UNKNOWN"; } } /** * Check whether a client is allowed to connect. Works with an authorization * file which contains one line per machine, no wildcards. * * @param opts The client who's trying to connect. * @return 0 - authorization refused, 1 - OK **/ int authorized_client(CLIENT *opts) { const char *ERRMSG="Invalid entry '%s' in authfile '%s', so, refusing all connections."; FILE *f ; char line[LINELEN]; char *tmp; struct in_addr addr; struct in_addr client; struct in_addr cltemp; int len; if ((f=fopen(opts->server->authname,"r"))==NULL) { msg(LOG_INFO, "Can't open authorization file %s (%s).", opts->server->authname, strerror(errno)); return 1 ; } inet_aton(opts->clientname, &client); while (fgets(line,LINELEN,f)!=NULL) { if((tmp=strchr(line, '/'))) { if(strlen(line)<=tmp-line) { msg(LOG_CRIT, ERRMSG, line, opts->server->authname); return 0; } *(tmp++)=0; if(!inet_aton(line,&addr)) { msg(LOG_CRIT, ERRMSG, line, opts->server->authname); return 0; } len=strtol(tmp, NULL, 0); addr.s_addr>>=32-len; addr.s_addr<<=32-len; memcpy(&cltemp,&client,sizeof(client)); cltemp.s_addr>>=32-len; cltemp.s_addr<<=32-len; if(addr.s_addr == cltemp.s_addr) { return 1; } } if (strncmp(line,opts->clientname,strlen(opts->clientname))==0) { fclose(f); return 1; } } fclose(f); return 0; } /** * Read data from a file descriptor into a buffer * * @param f a file descriptor * @param buf a buffer * @param len the number of bytes to be read **/ static inline void readit(int f, void *buf, size_t len) { ssize_t res; while (len > 0) { DEBUG("*"); if ((res = read(f, buf, len)) <= 0) { if(errno != EAGAIN) { err("Read failed: %m"); } } else { len -= res; buf += res; } } } /** * Consume data from an FD that we don't want * * @param f a file descriptor * @param buf a buffer * @param len the number of bytes to consume * @param bufsiz the size of the buffer **/ static inline void consume(int f, void * buf, size_t len, size_t bufsiz) { size_t curlen; while (len>0) { curlen = (len>bufsiz)?bufsiz:len; readit(f, buf, curlen); len -= curlen; } } /** * Write data from a buffer into a filedescriptor * * @param f a file descriptor * @param buf a buffer containing data * @param len the number of bytes to be written **/ static inline void writeit(int f, void *buf, size_t len) { ssize_t res; while (len > 0) { DEBUG("+"); if ((res = write(f, buf, len)) <= 0) err("Send failed: %m"); len -= res; buf += res; } } /** * Print out a message about how to use nbd-server. Split out to a separate * function so that we can call it from multiple places */ void usage() { printf("This is nbd-server version " VERSION "\n"); printf("Usage: [ip:|ip6@]port file_to_export [size][kKmM] [-l authorize_file] [-r] [-m] [-c] [-C configuration file] [-p PID file name] [-o section name] [-M max connections]\n" "\t-r|--read-only\t\tread only\n" "\t-m|--multi-file\t\tmultiple file\n" "\t-c|--copy-on-write\tcopy on write\n" "\t-C|--config-file\tspecify an alternate configuration file\n" "\t-l|--authorize-file\tfile with list of hosts that are allowed to\n\t\t\t\tconnect.\n" "\t-p|--pid-file\t\tspecify a filename to write our PID to\n" "\t-o|--output-config\toutput a config file section for what you\n\t\t\t\tspecified on the command line, with the\n\t\t\t\tspecified section name\n" "\t-M|--max-connections\tspecify the maximum number of opened connections\n\n" "\tif port is set to 0, stdin is used (for running from inetd).\n" "\tif file_to_export contains '%%s', it is substituted with the IP\n" "\t\taddress of the machine trying to connect\n" "\tif ip is set, it contains the local IP address on which we're listening.\n\tif not, the server will listen on all local IP addresses\n"); printf("Using configuration file %s\n", CFILE); } /* Dumps a config file section of the given SERVER*, and exits. */ void dump_section(SERVER* serve, gchar* section_header) { printf("[%s]\n", section_header); printf("\texportname = %s\n", serve->exportname); printf("\tlistenaddr = %s\n", serve->listenaddr); printf("\tport = %d\n", serve->port); if(serve->flags & F_READONLY) { printf("\treadonly = true\n"); } if(serve->flags & F_MULTIFILE) { printf("\tmultifile = true\n"); } if(serve->flags & F_COPYONWRITE) { printf("\tcopyonwrite = true\n"); } if(serve->expected_size) { printf("\tfilesize = %lld\n", (long long int)serve->expected_size); } if(serve->authname) { printf("\tauthfile = %s\n", serve->authname); } exit(EXIT_SUCCESS); } /** * Parse the command line. * * @param argc the argc argument to main() * @param argv the argv argument to main() **/ SERVER* cmdline(int argc, char *argv[]) { int i=0; int nonspecial=0; int c; struct option long_options[] = { {"read-only", no_argument, NULL, 'r'}, {"multi-file", no_argument, NULL, 'm'}, {"copy-on-write", no_argument, NULL, 'c'}, {"dont-fork", no_argument, NULL, 'd'}, {"authorize-file", required_argument, NULL, 'l'}, {"config-file", required_argument, NULL, 'C'}, {"pid-file", required_argument, NULL, 'p'}, {"output-config", required_argument, NULL, 'o'}, {"max-connection", required_argument, NULL, 'M'}, {0,0,0,0} }; SERVER *serve; off_t es; size_t last; char suffix; gboolean do_output=FALSE; gchar* section_header=""; gchar** addr_port; if(argc==1) { return NULL; } serve=g_new0(SERVER, 1); serve->authname = g_strdup(default_authname); serve->virtstyle=VIRT_IPLIT; while((c=getopt_long(argc, argv, "-C:cdl:mo:rp:M:", long_options, &i))>=0) { switch (c) { case 1: /* non-option argument */ switch(nonspecial++) { case 0: if(strchr(optarg, ':') == strrchr(optarg, ':')) { addr_port=g_strsplit(optarg, ":", 2); /* Check for "@" - maybe user using this separator for IPv4 address */ if(!addr_port[1]) { g_strfreev(addr_port); addr_port=g_strsplit(optarg, "@", 2); } } else { addr_port=g_strsplit(optarg, "@", 2); } if(addr_port[1]) { serve->port=strtol(addr_port[1], NULL, 0); serve->listenaddr=g_strdup(addr_port[0]); } else { serve->listenaddr=NULL; serve->port=strtol(addr_port[0], NULL, 0); } g_strfreev(addr_port); break; case 1: serve->exportname = g_strdup(optarg); if(serve->exportname[0] != '/') { fprintf(stderr, "E: The to be exported file needs to be an absolute filename!\n"); exit(EXIT_FAILURE); } break; case 2: last=strlen(optarg)-1; suffix=optarg[last]; if (suffix == 'k' || suffix == 'K' || suffix == 'm' || suffix == 'M') optarg[last] = '\0'; es = (off_t)atoll(optarg); switch (suffix) { case 'm': case 'M': es <<= 10; case 'k': case 'K': es <<= 10; default : break; } serve->expected_size = es; break; } break; case 'r': serve->flags |= F_READONLY; break; case 'm': serve->flags |= F_MULTIFILE; break; case 'o': do_output = TRUE; section_header = g_strdup(optarg); break; case 'p': strncpy(pidftemplate, optarg, 256); break; case 'c': serve->flags |=F_COPYONWRITE; break; case 'd': dontfork = 1; break; case 'C': g_free(config_file_pos); config_file_pos=g_strdup(optarg); break; case 'l': g_free(serve->authname); serve->authname=g_strdup(optarg); break; case 'M': serve->max_connections = strtol(optarg, NULL, 0); break; default: usage(); exit(EXIT_FAILURE); break; } } /* What's left: the port to export, the name of the to be exported * file, and, optionally, the size of the file, in that order. */ if(nonspecial<2) { g_free(serve); serve=NULL; } else { glob_flags |= F_OLDSTYLE; } if(do_output) { if(!serve) { g_critical("Need a complete configuration on the command line to output a config file section!"); exit(EXIT_FAILURE); } dump_section(serve, section_header); } return serve; } /** * Error domain common for all NBD server errors. **/ #define NBDS_ERR g_quark_from_static_string("server-error-quark") /** * NBD server error codes. **/ typedef enum { NBDS_ERR_CFILE_NOTFOUND, /**< The configuration file is not found */ NBDS_ERR_CFILE_MISSING_GENERIC, /**< The (required) group "generic" is missing */ NBDS_ERR_CFILE_KEY_MISSING, /**< A (required) key is missing */ NBDS_ERR_CFILE_VALUE_INVALID, /**< A value is syntactically invalid */ NBDS_ERR_CFILE_VALUE_UNSUPPORTED, /**< A value is not supported in this build */ NBDS_ERR_CFILE_NO_EXPORTS, /**< A config file was specified that does not define any exports */ NBDS_ERR_CFILE_INCORRECT_PORT, /**< The reserved port was specified for an old-style export. */ NBDS_ERR_CFILE_DIR_UNKNOWN, /**< A directory requested does not exist*/ NBDS_ERR_CFILE_READDIR_ERR, /**< Error occurred during readdir() */ NBDS_ERR_SO_LINGER, /**< Failed to set SO_LINGER to a socket */ NBDS_ERR_SO_REUSEADDR, /**< Failed to set SO_REUSEADDR to a socket */ NBDS_ERR_SO_KEEPALIVE, /**< Failed to set SO_KEEPALIVE to a socket */ NBDS_ERR_GAI, /**< Failed to get address info */ NBDS_ERR_SOCKET, /**< Failed to create a socket */ NBDS_ERR_BIND, /**< Failed to bind an address to socket */ NBDS_ERR_LISTEN, /**< Failed to start listening on a socket */ NBDS_ERR_SYS, /**< Underlying system call or library error */ } NBDS_ERRS; /** * duplicate server * @param s the old server we want to duplicate * @return new duplicated server **/ SERVER* dup_serve(const SERVER *const s) { SERVER *serve = NULL; serve=g_new0(SERVER, 1); if(serve == NULL) return NULL; if(s->exportname) serve->exportname = g_strdup(s->exportname); serve->expected_size = s->expected_size; if(s->listenaddr) serve->listenaddr = g_strdup(s->listenaddr); serve->port = s->port; if(s->authname) serve->authname = strdup(s->authname); serve->flags = s->flags; serve->socket = s->socket; serve->socket_family = s->socket_family; serve->virtstyle = s->virtstyle; serve->cidrlen = s->cidrlen; if(s->prerun) serve->prerun = g_strdup(s->prerun); if(s->postrun) serve->postrun = g_strdup(s->postrun); if(s->transactionlog) serve->transactionlog = g_strdup(s->transactionlog); if(s->servename) serve->servename = g_strdup(s->servename); serve->max_connections = s->max_connections; return serve; } /** * append new server to array * @param s server * @param a server array * @return 0 success, -1 error */ int append_serve(const SERVER *const s, GArray *const a) { SERVER *ns = NULL; struct addrinfo hints; struct addrinfo *ai = NULL; struct addrinfo *rp = NULL; char host[NI_MAXHOST]; gchar *port = NULL; int e; int ret; assert(s != NULL); port = g_strdup_printf("%d", s->port); memset(&hints,'\0',sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_ADDRCONFIG | AI_PASSIVE; hints.ai_protocol = IPPROTO_TCP; e = getaddrinfo(s->listenaddr, port, &hints, &ai); if (port) g_free(port); if(e == 0) { for (rp = ai; rp != NULL; rp = rp->ai_next) { e = getnameinfo(rp->ai_addr, rp->ai_addrlen, host, sizeof(host), NULL, 0, NI_NUMERICHOST); if (e != 0) { // error fprintf(stderr, "getnameinfo: %s\n", gai_strerror(e)); continue; } // duplicate server and set listenaddr to resolved IP address ns = dup_serve (s); if (ns) { ns->listenaddr = g_strdup(host); ns->socket_family = rp->ai_family; g_array_append_val(a, *ns); free(ns); ns = NULL; } } ret = 0; } else { fprintf(stderr, "getaddrinfo failed on listen host/address: %s (%s)\n", s->listenaddr ? s->listenaddr : "any", gai_strerror(e)); ret = -1; } if (ai) freeaddrinfo(ai); return ret; } /* forward definition of parse_cfile */ GArray* parse_cfile(gchar* f, struct generic_conf *genconf, GError** e); /** * Parse config file snippets in a directory. Uses readdir() and friends * to find files and open them, then passes them on to parse_cfile * with have_global set false **/ GArray* do_cfile_dir(gchar* dir, GError** e) { DIR* dirh = opendir(dir); struct dirent* de; gchar* fname; GArray* retval = NULL; GArray* tmp; struct stat stbuf; if(!dir) { g_set_error(e, NBDS_ERR, NBDS_ERR_CFILE_DIR_UNKNOWN, "Invalid directory specified: %s", strerror(errno)); return NULL; } errno=0; while((de = readdir(dirh))) { int saved_errno=errno; fname = g_build_filename(dir, de->d_name, NULL); switch(de->d_type) { case DT_UNKNOWN: /* Filesystem doesn't return type of * file through readdir. Run stat() on * the file instead */ if(stat(fname, &stbuf)) { perror("stat"); goto err_out; } if (!S_ISREG(stbuf.st_mode)) { goto next; } case DT_REG: /* Skip unless the name ends with '.conf' */ if(strcmp((de->d_name + strlen(de->d_name) - 5), ".conf")) { goto next; } tmp = parse_cfile(fname, NULL, e); errno=saved_errno; if(*e) { goto err_out; } if(!retval) retval = g_array_new(FALSE, TRUE, sizeof(SERVER)); retval = g_array_append_vals(retval, tmp->data, tmp->len); g_array_free(tmp, TRUE); default: break; } next: g_free(fname); } if(errno) { g_set_error(e, NBDS_ERR, NBDS_ERR_CFILE_READDIR_ERR, "Error trying to read directory: %s", strerror(errno)); err_out: if(retval) g_array_free(retval, TRUE); return NULL; } return retval; } /** * Parse the config file. * * @param f the name of the config file * * @param genconf a pointer to generic configuration which will get * updated with parsed values. If NULL, then parsed generic * configuration values are safely and silently discarded. * * @param e a GError. Error code can be any of the following: * NBDS_ERR_CFILE_NOTFOUND, NBDS_ERR_CFILE_MISSING_GENERIC, * NBDS_ERR_CFILE_VALUE_INVALID, NBDS_ERR_CFILE_VALUE_UNSUPPORTED * or NBDS_ERR_CFILE_NO_EXPORTS. @see NBDS_ERRS. * * @return a Array of SERVER* pointers, If the config file is empty or does not * exist, returns an empty GHashTable; if the config file contains an * error, returns NULL, and e is set appropriately **/ GArray* parse_cfile(gchar* f, struct generic_conf *const genconf, GError** e) { const char* DEFAULT_ERROR = "Could not parse %s in group %s: %s"; const char* MISSING_REQUIRED_ERROR = "Could not find required value %s in group %s: %s"; gchar* cfdir = NULL; SERVER s; gchar *virtstyle=NULL; PARAM lp[] = { { "exportname", TRUE, PARAM_STRING, &(s.exportname), 0 }, { "port", TRUE, PARAM_INT, &(s.port), 0 }, { "authfile", FALSE, PARAM_STRING, &(s.authname), 0 }, { "filesize", FALSE, PARAM_OFFT, &(s.expected_size), 0 }, { "virtstyle", FALSE, PARAM_STRING, &(virtstyle), 0 }, { "prerun", FALSE, PARAM_STRING, &(s.prerun), 0 }, { "postrun", FALSE, PARAM_STRING, &(s.postrun), 0 }, { "transactionlog", FALSE, PARAM_STRING, &(s.transactionlog), 0 }, { "readonly", FALSE, PARAM_BOOL, &(s.flags), F_READONLY }, { "multifile", FALSE, PARAM_BOOL, &(s.flags), F_MULTIFILE }, { "copyonwrite", FALSE, PARAM_BOOL, &(s.flags), F_COPYONWRITE }, { "sparse_cow", FALSE, PARAM_BOOL, &(s.flags), F_SPARSE }, { "sdp", FALSE, PARAM_BOOL, &(s.flags), F_SDP }, { "sync", FALSE, PARAM_BOOL, &(s.flags), F_SYNC }, { "flush", FALSE, PARAM_BOOL, &(s.flags), F_FLUSH }, { "fua", FALSE, PARAM_BOOL, &(s.flags), F_FUA }, { "rotational", FALSE, PARAM_BOOL, &(s.flags), F_ROTATIONAL }, { "temporary", FALSE, PARAM_BOOL, &(s.flags), F_TEMPORARY }, { "trim", FALSE, PARAM_BOOL, &(s.flags), F_TRIM }, { "listenaddr", FALSE, PARAM_STRING, &(s.listenaddr), 0 }, { "maxconnections", FALSE, PARAM_INT, &(s.max_connections), 0 }, }; const int lp_size=sizeof(lp)/sizeof(PARAM); struct generic_conf genconftmp; PARAM gp[] = { { "user", FALSE, PARAM_STRING, &(genconftmp.user), 0 }, { "group", FALSE, PARAM_STRING, &(genconftmp.group), 0 }, { "oldstyle", FALSE, PARAM_BOOL, &(genconftmp.flags), F_OLDSTYLE }, { "listenaddr", FALSE, PARAM_STRING, &(genconftmp.modernaddr), 0 }, { "port", FALSE, PARAM_STRING, &(genconftmp.modernport), 0 }, { "includedir", FALSE, PARAM_STRING, &cfdir, 0 }, { "allowlist", FALSE, PARAM_BOOL, &(genconftmp.flags), F_LIST }, }; PARAM* p=gp; int p_size=sizeof(gp)/sizeof(PARAM); GKeyFile *cfile; GError *err = NULL; const char *err_msg=NULL; GArray *retval=NULL; gchar **groups; gboolean bval; gint ival; gint64 i64val; gchar* sval; gchar* startgroup; gint i; gint j; memset(&genconftmp, 0, sizeof(struct generic_conf)); if (genconf) { /* Use the passed configuration values as defaults. The * parsing algorithm below updates all parameter targets * found from configuration files. */ memcpy(&genconftmp, genconf, sizeof(struct generic_conf)); } cfile = g_key_file_new(); retval = g_array_new(FALSE, TRUE, sizeof(SERVER)); if(!g_key_file_load_from_file(cfile, f, G_KEY_FILE_KEEP_COMMENTS | G_KEY_FILE_KEEP_TRANSLATIONS, &err)) { g_set_error(e, NBDS_ERR, NBDS_ERR_CFILE_NOTFOUND, "Could not open config file %s: %s", f, err->message); g_key_file_free(cfile); return retval; } startgroup = g_key_file_get_start_group(cfile); if((!startgroup || strcmp(startgroup, "generic")) && genconf) { g_set_error(e, NBDS_ERR, NBDS_ERR_CFILE_MISSING_GENERIC, "Config file does not contain the [generic] group!"); g_key_file_free(cfile); return NULL; } groups = g_key_file_get_groups(cfile, NULL); for(i=0;groups[i];i++) { memset(&s, '\0', sizeof(SERVER)); /* After the [generic] group or when we're parsing an include * directory, start parsing exports */ if(i==1 || !genconf) { p=lp; p_size=lp_size; if(!(glob_flags & F_OLDSTYLE)) { lp[1].required = FALSE; } } for(j=0;j<p_size;j++) { assert(p[j].target != NULL); assert(p[j].ptype==PARAM_INT||p[j].ptype==PARAM_STRING||p[j].ptype==PARAM_BOOL||p[j].ptype==PARAM_INT64); switch(p[j].ptype) { case PARAM_INT: ival = g_key_file_get_integer(cfile, groups[i], p[j].paramname, &err); if(!err) { *((gint*)p[j].target) = ival; } break; case PARAM_INT64: i64val = g_key_file_get_int64(cfile, groups[i], p[j].paramname, &err); if(!err) { *((gint64*)p[j].target) = i64val; } break; case PARAM_STRING: sval = g_key_file_get_string(cfile, groups[i], p[j].paramname, &err); if(!err) { *((gchar**)p[j].target) = sval; } break; case PARAM_BOOL: bval = g_key_file_get_boolean(cfile, groups[i], p[j].paramname, &err); if(!err) { if(bval) { *((gint*)p[j].target) |= p[j].flagval; } else { *((gint*)p[j].target) &= ~(p[j].flagval); } } break; } if(err) { if(err->code == G_KEY_FILE_ERROR_KEY_NOT_FOUND) { if(!p[j].required) { /* Ignore not-found error for optional values */ g_clear_error(&err); continue; } else { err_msg = MISSING_REQUIRED_ERROR; } } else { err_msg = DEFAULT_ERROR; } g_set_error(e, NBDS_ERR, NBDS_ERR_CFILE_VALUE_INVALID, err_msg, p[j].paramname, groups[i], err->message); g_array_free(retval, TRUE); g_error_free(err); g_key_file_free(cfile); return NULL; } } if(virtstyle) { if(!strncmp(virtstyle, "none", 4)) { s.virtstyle=VIRT_NONE; } else if(!strncmp(virtstyle, "ipliteral", 9)) { s.virtstyle=VIRT_IPLIT; } else if(!strncmp(virtstyle, "iphash", 6)) { s.virtstyle=VIRT_IPHASH; } else if(!strncmp(virtstyle, "cidrhash", 8)) { s.virtstyle=VIRT_CIDR; if(strlen(virtstyle)<10) { g_set_error(e, NBDS_ERR, NBDS_ERR_CFILE_VALUE_INVALID, "Invalid value %s for parameter virtstyle in group %s: missing length", virtstyle, groups[i]); g_array_free(retval, TRUE); g_key_file_free(cfile); return NULL; } s.cidrlen=strtol(virtstyle+8, NULL, 0); } else { g_set_error(e, NBDS_ERR, NBDS_ERR_CFILE_VALUE_INVALID, "Invalid value %s for parameter virtstyle in group %s", virtstyle, groups[i]); g_array_free(retval, TRUE); g_key_file_free(cfile); return NULL; } } else { s.virtstyle=VIRT_IPLIT; } if(s.port && !(glob_flags & F_OLDSTYLE)) { g_warning("A port was specified, but oldstyle exports were not requested. This may not do what you expect."); g_warning("Please read 'man 5 nbd-server' and search for oldstyle for more info"); } /* Don't need to free this, it's not our string */ virtstyle=NULL; /* Don't append values for the [generic] group */ if(i>0 || !genconf) { s.socket_family = AF_UNSPEC; s.servename = groups[i]; append_serve(&s, retval); } #ifndef WITH_SDP if(s.flags & F_SDP) { g_set_error(e, NBDS_ERR, NBDS_ERR_CFILE_VALUE_UNSUPPORTED, "This nbd-server was built without support for SDP, yet group %s uses it", groups[i]); g_array_free(retval, TRUE); g_key_file_free(cfile); return NULL; } #endif } g_key_file_free(cfile); if(cfdir) { GArray* extra = do_cfile_dir(cfdir, e); if(extra) { retval = g_array_append_vals(retval, extra->data, extra->len); i+=extra->len; g_array_free(extra, TRUE); } else { if(*e) { g_array_free(retval, TRUE); return NULL; } } } if(i==1 && genconf) { g_set_error(e, NBDS_ERR, NBDS_ERR_CFILE_NO_EXPORTS, "The config file does not specify any exports"); } if (genconf) { /* Return the updated generic configuration through the * pointer parameter. */ memcpy(genconf, &genconftmp, sizeof(struct generic_conf)); } return retval; } /** * Signal handler for SIGCHLD * @param s the signal we're handling (must be SIGCHLD, or something * is severely wrong) **/ void sigchld_handler(int s) { int status; int* i; pid_t pid; while((pid=waitpid(-1, &status, WNOHANG)) > 0) { if(WIFEXITED(status)) { msg(LOG_INFO, "Child exited with %d", WEXITSTATUS(status)); } i=g_hash_table_lookup(children, &pid); if(!i) { msg(LOG_INFO, "SIGCHLD received for an unknown child with PID %ld", (long)pid); } else { DEBUG("Removing %d from the list of children", pid); g_hash_table_remove(children, &pid); } } } /** * Kill a child. Called from sigterm_handler::g_hash_table_foreach. * * @param key the key * @param value the value corresponding to the above key * @param user_data a pointer which we always set to 1, so that we know what * will happen next. **/ void killchild(gpointer key, gpointer value, gpointer user_data) { pid_t *pid=value; kill(*pid, SIGTERM); } /** * Handle SIGTERM and dispatch it to our children * @param s the signal we're handling (must be SIGTERM, or something * is severely wrong). **/ void sigterm_handler(int s) { g_hash_table_foreach(children, killchild, NULL); unlink(pidfname); exit(EXIT_SUCCESS); } /** * Handle SIGHUP by setting atomically a flag which will be evaluated in * the main loop of the root server process. This allows us to separate * the signal catching from th actual task triggered by SIGHUP and hence * processing in the interrupt context is kept as minimial as possible. * * @param s the signal we're handling (must be SIGHUP, or something * is severely wrong). **/ static void sighup_handler(const int s G_GNUC_UNUSED) { is_sighup_caught = 1; } /** * Detect the size of a file. * * @param fhandle An open filedescriptor * @return the size of the file, or OFFT_MAX if detection was * impossible. **/ off_t size_autodetect(int fhandle) { off_t es; u64 bytes __attribute__((unused)); struct stat stat_buf; int error; #ifdef HAVE_SYS_MOUNT_H #ifdef HAVE_SYS_IOCTL_H #ifdef BLKGETSIZE64 DEBUG("looking for export size with ioctl BLKGETSIZE64\n"); if (!ioctl(fhandle, BLKGETSIZE64, &bytes) && bytes) { return (off_t)bytes; } #endif /* BLKGETSIZE64 */ #endif /* HAVE_SYS_IOCTL_H */ #endif /* HAVE_SYS_MOUNT_H */ DEBUG("looking for fhandle size with fstat\n"); stat_buf.st_size = 0; error = fstat(fhandle, &stat_buf); if (!error) { /* always believe stat if a regular file as it might really * be zero length */ if (S_ISREG(stat_buf.st_mode) || (stat_buf.st_size > 0)) return (off_t)stat_buf.st_size; } else { err("fstat failed: %m"); } DEBUG("looking for fhandle size with lseek SEEK_END\n"); es = lseek(fhandle, (off_t)0, SEEK_END); if (es > ((off_t)0)) { return es; } else { DEBUG("lseek failed: %d", errno==EBADF?1:(errno==ESPIPE?2:(errno==EINVAL?3:4))); } err("Could not find size of exported block device: %m"); } /** * Get the file handle and offset, given an export offset. * * @param export An array of export files * @param a The offset to get corresponding file/offset for * @param fhandle [out] File descriptor * @param foffset [out] Offset into fhandle * @param maxbytes [out] Tells how many bytes can be read/written * from fhandle starting at foffset (0 if there is no limit) * @return 0 on success, -1 on failure **/ int get_filepos(GArray* export, off_t a, int* fhandle, off_t* foffset, size_t* maxbytes ) { /* Negative offset not allowed */ if(a < 0) return -1; /* Binary search for last file with starting offset <= a */ FILE_INFO fi; int start = 0; int end = export->len - 1; while( start <= end ) { int mid = (start + end) / 2; fi = g_array_index(export, FILE_INFO, mid); if( fi.startoff < a ) { start = mid + 1; } else if( fi.startoff > a ) { end = mid - 1; } else { start = end = mid; break; } } /* end should never go negative, since first startoff is 0 and a >= 0 */ assert(end >= 0); fi = g_array_index(export, FILE_INFO, end); *fhandle = fi.fhandle; *foffset = a - fi.startoff; *maxbytes = 0; if( end+1 < export->len ) { FILE_INFO fi_next = g_array_index(export, FILE_INFO, end+1); *maxbytes = fi_next.startoff - a; } return 0; } /** * seek to a position in a file, with error handling. * @param handle a filedescriptor * @param a position to seek to * @todo get rid of this; lastpoint is a global variable right now, but it * shouldn't be. If we pass it on as a parameter, that makes things a *lot* * easier. **/ void myseek(int handle,off_t a) { if (lseek(handle, a, SEEK_SET) < 0) { err("Can not seek locally!\n"); } } /** * Write an amount of bytes at a given offset to the right file. This * abstracts the write-side of the multiple file option. * * @param a The offset where the write should start * @param buf The buffer to write from * @param len The length of buf * @param client The client we're serving for * @param fua Flag to indicate 'Force Unit Access' * @return The number of bytes actually written, or -1 in case of an error **/ ssize_t rawexpwrite(off_t a, char *buf, size_t len, CLIENT *client, int fua) { int fhandle; off_t foffset; size_t maxbytes; ssize_t retval; if(get_filepos(client->export, a, &fhandle, &foffset, &maxbytes)) return -1; if(maxbytes && len > maxbytes) len = maxbytes; DEBUG("(WRITE to fd %d offset %llu len %u fua %d), ", fhandle, (long long unsigned)foffset, (unsigned int)len, fua); myseek(fhandle, foffset); retval = write(fhandle, buf, len); if(client->server->flags & F_SYNC) { fsync(fhandle); } else if (fua) { /* This is where we would do the following * #ifdef USE_SYNC_FILE_RANGE * However, we don't, for the reasons set out below * by Christoph Hellwig <hch@infradead.org> * * [BEGINS] * fdatasync is equivalent to fsync except that it does not flush * non-essential metadata (basically just timestamps in practice), but it * does flush metadata requried to find the data again, e.g. allocation * information and extent maps. sync_file_range does nothing but flush * out pagecache content - it means you basically won't get your data * back in case of a crash if you either: * * a) have a volatile write cache in your disk (e.g. any normal SATA disk) * b) are using a sparse file on a filesystem * c) are using a fallocate-preallocated file on a filesystem * d) use any file on a COW filesystem like btrfs * * e.g. it only does anything useful for you if you do not have a volatile * write cache, and either use a raw block device node, or just overwrite * an already fully allocated (and not preallocated) file on a non-COW * filesystem. * [ENDS] * * What we should do is open a second FD with O_DSYNC set, then write to * that when appropriate. However, with a Linux client, every REQ_FUA * immediately follows a REQ_FLUSH, so fdatasync does not cause performance * problems. * */ #if 0 sync_file_range(fhandle, foffset, len, SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER); #else fdatasync(fhandle); #endif } return retval; } /** * Call rawexpwrite repeatedly until all data has been written. * * @param a The offset where the write should start * @param buf The buffer to write from * @param len The length of buf * @param client The client we're serving for * @param fua Flag to indicate 'Force Unit Access' * @return 0 on success, nonzero on failure **/ int rawexpwrite_fully(off_t a, char *buf, size_t len, CLIENT *client, int fua) { ssize_t ret=0; while(len > 0 && (ret=rawexpwrite(a, buf, len, client, fua)) > 0 ) { a += ret; buf += ret; len -= ret; } return (ret < 0 || len != 0); } /** * Read an amount of bytes at a given offset from the right file. This * abstracts the read-side of the multiple files option. * * @param a The offset where the read should start * @param buf A buffer to read into * @param len The size of buf * @param client The client we're serving for * @return The number of bytes actually read, or -1 in case of an * error. **/ ssize_t rawexpread(off_t a, char *buf, size_t len, CLIENT *client) { int fhandle; off_t foffset; size_t maxbytes; if(get_filepos(client->export, a, &fhandle, &foffset, &maxbytes)) return -1; if(maxbytes && len > maxbytes) len = maxbytes; DEBUG("(READ from fd %d offset %llu len %u), ", fhandle, (long long unsigned int)foffset, (unsigned int)len); myseek(fhandle, foffset); return read(fhandle, buf, len); } /** * Call rawexpread repeatedly until all data has been read. * @return 0 on success, nonzero on failure **/ int rawexpread_fully(off_t a, char *buf, size_t len, CLIENT *client) { ssize_t ret=0; while(len > 0 && (ret=rawexpread(a, buf, len, client)) > 0 ) { a += ret; buf += ret; len -= ret; } return (ret < 0 || len != 0); } /** * Read an amount of bytes at a given offset from the right file. This * abstracts the read-side of the copyonwrite stuff, and calls * rawexpread() with the right parameters to do the actual work. * @param a The offset where the read should start * @param buf A buffer to read into * @param len The size of buf * @param client The client we're going to read for * @return 0 on success, nonzero on failure **/ int expread(off_t a, char *buf, size_t len, CLIENT *client) { off_t rdlen, offset; off_t mapcnt, mapl, maph, pagestart; if (!(client->server->flags & F_COPYONWRITE)) return(rawexpread_fully(a, buf, len, client)); DEBUG("Asked to read %u bytes at %llu.\n", (unsigned int)len, (unsigned long long)a); mapl=a/DIFFPAGESIZE; maph=(a+len-1)/DIFFPAGESIZE; for (mapcnt=mapl;mapcnt<=maph;mapcnt++) { pagestart=mapcnt*DIFFPAGESIZE; offset=a-pagestart; rdlen=(0<DIFFPAGESIZE-offset && len<(size_t)(DIFFPAGESIZE-offset)) ? len : (size_t)DIFFPAGESIZE-offset; if (client->difmap[mapcnt]!=(u32)(-1)) { /* the block is already there */ DEBUG("Page %llu is at %lu\n", (unsigned long long)mapcnt, (unsigned long)(client->difmap[mapcnt])); myseek(client->difffile, client->difmap[mapcnt]*DIFFPAGESIZE+offset); if (read(client->difffile, buf, rdlen) != rdlen) return -1; } else { /* the block is not there */ DEBUG("Page %llu is not here, we read the original one\n", (unsigned long long)mapcnt); if(rawexpread_fully(a, buf, rdlen, client)) return -1; } len-=rdlen; a+=rdlen; buf+=rdlen; } return 0; } /** * Write an amount of bytes at a given offset to the right file. This * abstracts the write-side of the copyonwrite option, and calls * rawexpwrite() with the right parameters to do the actual work. * * @param a The offset where the write should start * @param buf The buffer to write from * @param len The length of buf * @param client The client we're going to write for. * @param fua Flag to indicate 'Force Unit Access' * @return 0 on success, nonzero on failure **/ int expwrite(off_t a, char *buf, size_t len, CLIENT *client, int fua) { char pagebuf[DIFFPAGESIZE]; off_t mapcnt,mapl,maph; off_t wrlen,rdlen; off_t pagestart; off_t offset; if (!(client->server->flags & F_COPYONWRITE)) return(rawexpwrite_fully(a, buf, len, client, fua)); DEBUG("Asked to write %u bytes at %llu.\n", (unsigned int)len, (unsigned long long)a); mapl=a/DIFFPAGESIZE ; maph=(a+len-1)/DIFFPAGESIZE ; for (mapcnt=mapl;mapcnt<=maph;mapcnt++) { pagestart=mapcnt*DIFFPAGESIZE ; offset=a-pagestart ; wrlen=(0<DIFFPAGESIZE-offset && len<(size_t)(DIFFPAGESIZE-offset)) ? len : (size_t)DIFFPAGESIZE-offset; if (client->difmap[mapcnt]!=(u32)(-1)) { /* the block is already there */ DEBUG("Page %llu is at %lu\n", (unsigned long long)mapcnt, (unsigned long)(client->difmap[mapcnt])) ; myseek(client->difffile, client->difmap[mapcnt]*DIFFPAGESIZE+offset); if (write(client->difffile, buf, wrlen) != wrlen) return -1 ; } else { /* the block is not there */ myseek(client->difffile,client->difffilelen*DIFFPAGESIZE) ; client->difmap[mapcnt]=(client->server->flags&F_SPARSE)?mapcnt:client->difffilelen++; DEBUG("Page %llu is not here, we put it at %lu\n", (unsigned long long)mapcnt, (unsigned long)(client->difmap[mapcnt])); rdlen=DIFFPAGESIZE ; if (rawexpread_fully(pagestart, pagebuf, rdlen, client)) return -1; memcpy(pagebuf+offset,buf,wrlen) ; if (write(client->difffile, pagebuf, DIFFPAGESIZE) != DIFFPAGESIZE) return -1; } len-=wrlen ; a+=wrlen ; buf+=wrlen ; } if (client->server->flags & F_SYNC) { fsync(client->difffile); } else if (fua) { /* open question: would it be cheaper to do multiple sync_file_ranges? as we iterate through the above? */ fdatasync(client->difffile); } return 0; } /** * Flush data to a client * * @param client The client we're going to write for. * @return 0 on success, nonzero on failure **/ int expflush(CLIENT *client) { gint i; if (client->server->flags & F_COPYONWRITE) { return fsync(client->difffile); } for (i = 0; i < client->export->len; i++) { FILE_INFO fi = g_array_index(client->export, FILE_INFO, i); if (fsync(fi.fhandle) < 0) return -1; } return 0; } /* * If the current system supports it, call fallocate() on the backend * file to resparsify stuff that isn't needed anymore (see NBD_CMD_TRIM) */ int exptrim(struct nbd_request* req, CLIENT* client) { #if HAVE_FALLOC_PH FILE_INFO prev = g_array_index(client->export, FILE_INFO, 0); FILE_INFO cur = prev; int i = 1; /* We're running on a system that supports the * FALLOC_FL_PUNCH_HOLE option to re-sparsify a file */ do { if(i<client->export->len) { cur = g_array_index(client->export, FILE_INFO, i); } if(prev.startoff <= req->from) { off_t curoff = req->from - prev.startoff; off_t curlen = cur.startoff - prev.startoff - curoff; fallocate(prev.fhandle, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, curoff, curlen); } prev = cur; } while(i < client->export->len && cur.startoff < (req->from + req->len)); DEBUG("Performed TRIM request from %llu to %llu", (unsigned long long) req->from, (unsigned long long) req->len); #else DEBUG("Ignoring TRIM request (not supported on current platform"); #endif return 0; } static void send_reply(uint32_t opt, int net, uint32_t reply_type, size_t datasize, void* data) { uint64_t magic = htonll(0x3e889045565a9LL); reply_type = htonl(reply_type); uint32_t datsize = htonl(datasize); struct iovec v_data[] = { { &magic, sizeof(magic) }, { &opt, sizeof(opt) }, { &reply_type, sizeof(reply_type) }, { &datsize, sizeof(datsize) }, { data, datasize }, }; writev(net, v_data, 5); } static CLIENT* handle_export_name(uint32_t opt, int net, GArray* servers, uint32_t cflags) { uint32_t namelen; char* name; int i; if (read(net, &namelen, sizeof(namelen)) < 0) { err("Negotiation failed/7: %m"); return NULL; } namelen = ntohl(namelen); name = malloc(namelen+1); name[namelen]=0; if (read(net, name, namelen) < 0) { err("Negotiation failed/8: %m"); free(name); return NULL; } for(i=0; i<servers->len; i++) { SERVER* serve = &(g_array_index(servers, SERVER, i)); if(!strcmp(serve->servename, name)) { CLIENT* client = g_new0(CLIENT, 1); client->server = serve; client->exportsize = OFFT_MAX; client->net = net; client->modern = TRUE; client->transactionlogfd = -1; client->clientfeats = cflags; free(name); return client; } } err("Negotiation failed/8a: Requested export not found"); free(name); return NULL; } static void handle_list(uint32_t opt, int net, GArray* servers, uint32_t cflags) { uint32_t len; int i; char buf[1024]; char *ptr = buf + sizeof(len); if (read(net, &len, sizeof(len)) < 0) err("Negotiation failed/8: %m"); len = ntohl(len); if(len) { send_reply(opt, net, NBD_REP_ERR_INVALID, 0, NULL); } if(!(glob_flags & F_LIST)) { send_reply(opt, net, NBD_REP_ERR_POLICY, 0, NULL); err_nonfatal("Client tried disallowed list option"); return; } for(i=0; i<servers->len; i++) { SERVER* serve = &(g_array_index(servers, SERVER, i)); len = htonl(strlen(serve->servename)); memcpy(buf, &len, sizeof(len)); strcpy(ptr, serve->servename); send_reply(opt, net, NBD_REP_SERVER, strlen(serve->servename)+sizeof(len), buf); } send_reply(opt, net, NBD_REP_ACK, 0, NULL); } /** * Do the initial negotiation. * * @param client The client we're negotiating with. **/ CLIENT* negotiate(int net, CLIENT *client, GArray* servers, int phase) { char zeros[128]; uint64_t size_host; uint32_t flags = NBD_FLAG_HAS_FLAGS; uint16_t smallflags = 0; uint64_t magic; memset(zeros, '\0', sizeof(zeros)); assert(((phase & NEG_INIT) && (phase & NEG_MODERN)) || client); if(phase & NEG_MODERN) { smallflags |= NBD_FLAG_FIXED_NEWSTYLE; } if(phase & NEG_INIT) { /* common */ if (write(net, INIT_PASSWD, 8) < 0) { err_nonfatal("Negotiation failed/1: %m"); if(client) exit(EXIT_FAILURE); } if(phase & NEG_MODERN) { /* modern */ magic = htonll(opts_magic); } else { /* oldstyle */ magic = htonll(cliserv_magic); } if (write(net, &magic, sizeof(magic)) < 0) { err_nonfatal("Negotiation failed/2: %m"); if(phase & NEG_OLD) exit(EXIT_FAILURE); } } if ((phase & NEG_MODERN) && (phase & NEG_INIT)) { /* modern */ uint32_t cflags; uint32_t opt; if(!servers) err("programmer error"); smallflags = htons(smallflags); if (write(net, &smallflags, sizeof(uint16_t)) < 0) err_nonfatal("Negotiation failed/3: %m"); if (read(net, &cflags, sizeof(cflags)) < 0) err_nonfatal("Negotiation failed/4: %m"); cflags = htonl(cflags); do { if (read(net, &magic, sizeof(magic)) < 0) err_nonfatal("Negotiation failed/5: %m"); magic = ntohll(magic); if(magic != opts_magic) { err_nonfatal("Negotiation failed/5a: magic mismatch"); return NULL; } if (read(net, &opt, sizeof(opt)) < 0) err_nonfatal("Negotiation failed/6: %m"); opt = ntohl(opt); switch(opt) { case NBD_OPT_EXPORT_NAME: // NBD_OPT_EXPORT_NAME must be the last // selected option, so return from here // if that is chosen. return handle_export_name(opt, net, servers, cflags); break; case NBD_OPT_LIST: handle_list(opt, net, servers, cflags); break; case NBD_OPT_ABORT: // handled below break; default: send_reply(opt, net, NBD_REP_ERR_UNSUP, 0, NULL); break; } } while((opt != NBD_OPT_EXPORT_NAME) && (opt != NBD_OPT_ABORT)); if(opt == NBD_OPT_ABORT) { err_nonfatal("Session terminated by client"); return NULL; } } /* common */ size_host = htonll((u64)(client->exportsize)); if (write(net, &size_host, 8) < 0) err("Negotiation failed/9: %m"); if (client->server->flags & F_READONLY) flags |= NBD_FLAG_READ_ONLY; if (client->server->flags & F_FLUSH) flags |= NBD_FLAG_SEND_FLUSH; if (client->server->flags & F_FUA) flags |= NBD_FLAG_SEND_FUA; if (client->server->flags & F_ROTATIONAL) flags |= NBD_FLAG_ROTATIONAL; if (client->server->flags & F_TRIM) flags |= NBD_FLAG_SEND_TRIM; if (phase & NEG_OLD) { /* oldstyle */ flags = htonl(flags); if (write(client->net, &flags, 4) < 0) err("Negotiation failed/10: %m"); } else { /* modern */ smallflags = (uint16_t)(flags & ~((uint16_t)0)); smallflags = htons(smallflags); if (write(client->net, &smallflags, sizeof(smallflags)) < 0) { err("Negotiation failed/11: %m"); } } /* common */ if (write(client->net, zeros, 124) < 0) err("Negotiation failed/12: %m"); return NULL; } /** sending macro. */ #define SEND(net,reply) { writeit( net, &reply, sizeof( reply )); \ if (client->transactionlogfd != -1) \ writeit(client->transactionlogfd, &reply, sizeof(reply)); } /** error macro. */ #define ERROR(client,reply,errcode) { reply.error = htonl(errcode); SEND(client->net,reply); reply.error = 0; } /** * Serve a file to a single client. * * @todo This beast needs to be split up in many tiny little manageable * pieces. Preferably with a chainsaw. * * @param client The client we're going to serve to. * @return when the client disconnects **/ int mainloop(CLIENT *client) { struct nbd_request request; struct nbd_reply reply; gboolean go_on=TRUE; #ifdef DODBG int i = 0; #endif negotiate(client->net, client, NULL, client->modern ? NEG_MODERN : (NEG_OLD | NEG_INIT)); DEBUG("Entering request loop!\n"); reply.magic = htonl(NBD_REPLY_MAGIC); reply.error = 0; while (go_on) { char buf[BUFSIZE]; char* p; size_t len; size_t currlen; size_t writelen; uint16_t command; #ifdef DODBG i++; printf("%d: ", i); #endif readit(client->net, &request, sizeof(request)); if (client->transactionlogfd != -1) writeit(client->transactionlogfd, &request, sizeof(request)); request.from = ntohll(request.from); request.type = ntohl(request.type); command = request.type & NBD_CMD_MASK_COMMAND; len = ntohl(request.len); DEBUG("%s from %llu (%llu) len %u, ", getcommandname(command), (unsigned long long)request.from, (unsigned long long)request.from / 512, len); if (request.magic != htonl(NBD_REQUEST_MAGIC)) err("Not enough magic."); memcpy(reply.handle, request.handle, sizeof(reply.handle)); if ((command==NBD_CMD_WRITE) || (command==NBD_CMD_READ)) { if (request.from + len < request.from) { // 64 bit overflow!! DEBUG("[Number too large!]"); ERROR(client, reply, EINVAL); continue; } if (((off_t)request.from + len) > client->exportsize) { DEBUG("[RANGE!]"); ERROR(client, reply, EINVAL); continue; } currlen = len; if (currlen > BUFSIZE - sizeof(struct nbd_reply)) { currlen = BUFSIZE - sizeof(struct nbd_reply); if(!logged_oversized) { msg(LOG_DEBUG, "oversized request (this is not a problem)"); logged_oversized = true; } } } switch (command) { case NBD_CMD_DISC: msg(LOG_INFO, "Disconnect request received."); if (client->server->flags & F_COPYONWRITE) { if (client->difmap) g_free(client->difmap) ; close(client->difffile); unlink(client->difffilename); free(client->difffilename); } go_on=FALSE; continue; case NBD_CMD_WRITE: DEBUG("wr: net->buf, "); while(len > 0) { readit(client->net, buf, currlen); DEBUG("buf->exp, "); if ((client->server->flags & F_READONLY) || (client->server->flags & F_AUTOREADONLY)) { DEBUG("[WRITE to READONLY!]"); ERROR(client, reply, EPERM); consume(client->net, buf, len-currlen, BUFSIZE); continue; } if (expwrite(request.from, buf, currlen, client, request.type & NBD_CMD_FLAG_FUA)) { DEBUG("Write failed: %m" ); ERROR(client, reply, errno); consume(client->net, buf, len-currlen, BUFSIZE); continue; } len -= currlen; request.from += currlen; currlen = (len < BUFSIZE) ? len : BUFSIZE; } SEND(client->net, reply); DEBUG("OK!\n"); continue; case NBD_CMD_FLUSH: DEBUG("fl: "); if (expflush(client)) { DEBUG("Flush failed: %m"); ERROR(client, reply, errno); continue; } SEND(client->net, reply); DEBUG("OK!\n"); continue; case NBD_CMD_READ: DEBUG("exp->buf, "); if (client->transactionlogfd != -1) writeit(client->transactionlogfd, &reply, sizeof(reply)); writeit(client->net, &reply, sizeof(reply)); p = buf; writelen = currlen; while(len > 0) { if (expread(request.from, p, currlen, client)) { DEBUG("Read failed: %m"); ERROR(client, reply, errno); continue; } DEBUG("buf->net, "); writeit(client->net, buf, writelen); len -= currlen; request.from += currlen; currlen = (len < BUFSIZE) ? len : BUFSIZE; p = buf; writelen = currlen; } DEBUG("OK!\n"); continue; case NBD_CMD_TRIM: /* The kernel module sets discard_zeroes_data == 0, * so it is okay to do nothing. */ if (exptrim(&request, client)) { DEBUG("Trim failed: %m"); ERROR(client, reply, errno); continue; } SEND(client->net, reply); continue; default: DEBUG ("Ignoring unknown command\n"); continue; } } return 0; } /** * Set up client export array, which is an array of FILE_INFO. * Also, split a single exportfile into multiple ones, if that was asked. * @param client information on the client which we want to setup export for **/ void setupexport(CLIENT* client) { int i; off_t laststartoff = 0, lastsize = 0; int multifile = (client->server->flags & F_MULTIFILE); int temporary = (client->server->flags & F_TEMPORARY) && !multifile; int cancreate = (client->server->expected_size) && !multifile; client->export = g_array_new(TRUE, TRUE, sizeof(FILE_INFO)); /* If multi-file, open as many files as we can. * If not, open exactly one file. * Calculate file sizes as we go to get total size. */ for(i=0; ; i++) { FILE_INFO fi; gchar *tmpname; gchar* error_string; if (i) cancreate = 0; /* if expected_size is specified, and this is the first file, we can create the file */ mode_t mode = (client->server->flags & F_READONLY) ? O_RDONLY : (O_RDWR | (cancreate?O_CREAT:0)); if (temporary) { tmpname=g_strdup_printf("%s.%d-XXXXXX", client->exportname, i); DEBUG( "Opening %s\n", tmpname ); fi.fhandle = mkstemp(tmpname); } else { if(multifile) { tmpname=g_strdup_printf("%s.%d", client->exportname, i); } else { tmpname=g_strdup(client->exportname); } DEBUG( "Opening %s\n", tmpname ); fi.fhandle = open(tmpname, mode, 0x600); if(fi.fhandle == -1 && mode == O_RDWR) { /* Try again because maybe media was read-only */ fi.fhandle = open(tmpname, O_RDONLY); if(fi.fhandle != -1) { /* Opening the base file in copyonwrite mode is * okay */ if(!(client->server->flags & F_COPYONWRITE)) { client->server->flags |= F_AUTOREADONLY; client->server->flags |= F_READONLY; } } } } if(fi.fhandle == -1) { if(multifile && i>0) break; error_string=g_strdup_printf( "Could not open exported file %s: %%m", tmpname); err(error_string); } if (temporary) unlink(tmpname); /* File will stick around whilst FD open */ fi.startoff = laststartoff + lastsize; g_array_append_val(client->export, fi); g_free(tmpname); /* Starting offset and size of this file will be used to * calculate starting offset of next file */ laststartoff = fi.startoff; lastsize = size_autodetect(fi.fhandle); /* If we created the file, it will be length zero */ if (!lastsize && cancreate) { assert(!multifile); if(ftruncate (fi.fhandle, client->server->expected_size)<0) { err("Could not expand file: %m"); } lastsize = client->server->expected_size; break; /* don't look for any more files */ } if(!multifile || temporary) break; } /* Set export size to total calculated size */ client->exportsize = laststartoff + lastsize; /* Export size may be overridden */ if(client->server->expected_size) { /* desired size must be <= total calculated size */ if(client->server->expected_size > client->exportsize) { err("Size of exported file is too big\n"); } client->exportsize = client->server->expected_size; } msg(LOG_INFO, "Size of exported file/device is %llu", (unsigned long long)client->exportsize); if(multifile) { msg(LOG_INFO, "Total number of files: %d", i); } } int copyonwrite_prepare(CLIENT* client) { off_t i; if ((client->difffilename = malloc(1024))==NULL) err("Failed to allocate string for diff file name"); snprintf(client->difffilename, 1024, "%s-%s-%d.diff",client->exportname,client->clientname, (int)getpid()) ; client->difffilename[1023]='\0'; msg(LOG_INFO, "About to create map and diff file %s", client->difffilename) ; client->difffile=open(client->difffilename,O_RDWR | O_CREAT | O_TRUNC,0600) ; if (client->difffile<0) err("Could not create diff file (%m)") ; if ((client->difmap=calloc(client->exportsize/DIFFPAGESIZE,sizeof(u32)))==NULL) err("Could not allocate memory") ; for (i=0;i<client->exportsize/DIFFPAGESIZE;i++) client->difmap[i]=(u32)-1 ; return 0; } /** * Run a command. This is used for the ``prerun'' and ``postrun'' config file * options * * @param command the command to be ran. Read from the config file * @param file the file name we're about to export **/ int do_run(gchar* command, gchar* file) { gchar* cmd; int retval=0; if(command && *command) { cmd = g_strdup_printf(command, file); retval=system(cmd); g_free(cmd); } return retval; } /** * Serve a connection. * * @todo allow for multithreading, perhaps use libevent. Not just yet, though; * follow the road map. * * @param client a connected client **/ void serveconnection(CLIENT *client) { if (client->server->transactionlog && (client->transactionlogfd == -1)) { if (-1 == (client->transactionlogfd = open(client->server->transactionlog, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR))) g_warning("Could not open transaction log %s", client->server->transactionlog); } if(do_run(client->server->prerun, client->exportname)) { exit(EXIT_FAILURE); } setupexport(client); if (client->server->flags & F_COPYONWRITE) { copyonwrite_prepare(client); } setmysockopt(client->net); mainloop(client); do_run(client->server->postrun, client->exportname); if (-1 != client->transactionlogfd) { close(client->transactionlogfd); client->transactionlogfd = -1; } } /** * Find the name of the file we have to serve. This will use g_strdup_printf * to put the IP address of the client inside a filename containing * "%s" (in the form as specified by the "virtstyle" option). That name * is then written to client->exportname. * * @param net A socket connected to an nbd client * @param client information about the client. The IP address in human-readable * format will be written to a new char* buffer, the address of which will be * stored in client->clientname. * @return: 0 - OK, -1 - failed. **/ int set_peername(int net, CLIENT *client) { struct sockaddr_storage addrin; struct sockaddr_storage netaddr; struct sockaddr_in *netaddr4 = NULL; struct sockaddr_in6 *netaddr6 = NULL; socklen_t addrinlen = sizeof( addrin ); struct addrinfo hints; struct addrinfo *ai = NULL; char peername[NI_MAXHOST]; char netname[NI_MAXHOST]; char *tmp = NULL; int i; int e; int shift; if (getpeername(net, (struct sockaddr *) &addrin, &addrinlen) < 0) { msg(LOG_INFO, "getpeername failed: %m"); return -1; } if((e = getnameinfo((struct sockaddr *)&addrin, addrinlen, peername, sizeof (peername), NULL, 0, NI_NUMERICHOST))) { msg(LOG_INFO, "getnameinfo failed: %s", gai_strerror(e)); return -1; } memset(&hints, '\0', sizeof (hints)); hints.ai_flags = AI_ADDRCONFIG; e = getaddrinfo(peername, NULL, &hints, &ai); if(e != 0) { msg(LOG_INFO, "getaddrinfo failed: %s", gai_strerror(e)); freeaddrinfo(ai); return -1; } switch(client->server->virtstyle) { case VIRT_NONE: msg(LOG_DEBUG, "virtualization is off"); client->exportname=g_strdup(client->server->exportname); break; case VIRT_IPHASH: msg(LOG_DEBUG, "virtstyle iphash"); for(i=0;i<strlen(peername);i++) { if(peername[i]=='.') { peername[i]='/'; } } case VIRT_IPLIT: msg(LOG_DEBUG, "virststyle ipliteral"); client->exportname=g_strdup_printf(client->server->exportname, peername); break; case VIRT_CIDR: msg(LOG_DEBUG, "virtstyle cidr %d", client->server->cidrlen); memcpy(&netaddr, &addrin, addrinlen); if(ai->ai_family == AF_INET) { netaddr4 = (struct sockaddr_in *)&netaddr; (netaddr4->sin_addr).s_addr>>=32-(client->server->cidrlen); (netaddr4->sin_addr).s_addr<<=32-(client->server->cidrlen); getnameinfo((struct sockaddr *) netaddr4, addrinlen, netname, sizeof (netname), NULL, 0, NI_NUMERICHOST); tmp=g_strdup_printf("%s/%s", netname, peername); }else if(ai->ai_family == AF_INET6) { netaddr6 = (struct sockaddr_in6 *)&netaddr; shift = 128-(client->server->cidrlen); i = 3; while(shift >= 8) { ((netaddr6->sin6_addr).s6_addr[i])=0; shift-=8; i--; } (netaddr6->sin6_addr).s6_addr[i]>>=shift; (netaddr6->sin6_addr).s6_addr[i]<<=shift; getnameinfo((struct sockaddr *)netaddr6, addrinlen, netname, sizeof(netname), NULL, 0, NI_NUMERICHOST); tmp=g_strdup_printf("%s/%s", netname, peername); } if(tmp != NULL) client->exportname=g_strdup_printf(client->server->exportname, tmp); break; } freeaddrinfo(ai); msg(LOG_INFO, "connect from %s, assigned file is %s", peername, client->exportname); client->clientname=g_strdup(peername); return 0; } /** * Destroy a pid_t* * @param data a pointer to pid_t which should be freed **/ void destroy_pid_t(gpointer data) { g_free(data); } static pid_t spawn_child() { pid_t pid; sigset_t newset; sigset_t oldset; sigemptyset(&newset); sigaddset(&newset, SIGCHLD); sigaddset(&newset, SIGTERM); sigprocmask(SIG_BLOCK, &newset, &oldset); pid = fork(); if (pid < 0) { msg(LOG_ERR, "Could not fork (%s)", strerror(errno)); goto out; } if (pid > 0) { /* Parent */ pid_t *pidp; pidp = g_malloc(sizeof(pid_t)); *pidp = pid; g_hash_table_insert(children, pidp, pidp); goto out; } /* Child */ signal(SIGCHLD, SIG_DFL); signal(SIGTERM, SIG_DFL); signal(SIGHUP, SIG_DFL); out: sigprocmask(SIG_SETMASK, &oldset, NULL); return pid; } static int socket_accept(const int sock) { struct sockaddr_storage addrin; socklen_t addrinlen = sizeof(addrin); int net; net = accept(sock, (struct sockaddr *) &addrin, &addrinlen); if (net < 0) { err_nonfatal("Failed to accept socket connection: %m"); } return net; } static void handle_modern_connection(GArray *const servers, const int sock) { int net; pid_t pid; CLIENT *client = NULL; int sock_flags_old; int sock_flags_new; net = socket_accept(sock); if (net < 0) return; if (!dontfork) { pid = spawn_child(); if (pid) { if (pid > 0) msg(LOG_INFO, "Spawned a child process"); if (pid < 0) msg(LOG_ERR, "Failed to spawn a child process"); close(net); return; } /* Child just continues. */ } client = negotiate(net, NULL, servers, NEG_INIT | NEG_MODERN); if (!client) { msg(LOG_ERR, "Modern initial negotiation failed"); goto handler_err; } if (client->server->max_connections > 0 && g_hash_table_size(children) >= client->server->max_connections) { msg(LOG_ERR, "Max connections (%d) reached", client->server->max_connections); goto handler_err; } sock_flags_old = fcntl(net, F_GETFL, 0); if (sock_flags_old == -1) { msg(LOG_ERR, "Failed to get socket flags"); goto handler_err; } sock_flags_new = sock_flags_old & ~O_NONBLOCK; if (sock_flags_new != sock_flags_old && fcntl(net, F_SETFL, sock_flags_new) == -1) { msg(LOG_ERR, "Failed to set socket to blocking mode"); goto handler_err; } if (set_peername(net, client)) { msg(LOG_ERR, "Failed to set peername"); goto handler_err; } if (!authorized_client(client)) { msg(LOG_INFO, "Client '%s' is not authorized to access", client->clientname); goto handler_err; } if (!dontfork) { int i; /* Free all root server resources here, because we are * currently in the child process serving one specific * connection. These are not simply needed anymore. */ g_hash_table_destroy(children); children = NULL; for (i = 0; i < modernsocks->len; i++) { close(g_array_index(modernsocks, int, i)); } g_array_free(modernsocks, TRUE); /* Now that we are in the child process after a * succesful negotiation, we do not need the list of * servers anymore, get rid of it.*/ for (i = 0; i < servers->len; i++) { const SERVER *const server = &g_array_index(servers, SERVER, i); close(server->socket); } /* FALSE does not free the actual data. This is required, because the client has a direct reference into that data, and otherwise we get a segfault... */ g_array_free(servers, FALSE); } msg(LOG_INFO, "Starting to serve"); serveconnection(client); exit(EXIT_SUCCESS); handler_err: g_free(client); close(net); if (!dontfork) { exit(EXIT_FAILURE); } } static void handle_connection(GArray *servers, int net, SERVER *serve, CLIENT *client) { int sock_flags_old; int sock_flags_new; if(serve->max_connections > 0 && g_hash_table_size(children) >= serve->max_connections) { msg(LOG_INFO, "Max connections reached"); goto handle_connection_out; } if((sock_flags_old = fcntl(net, F_GETFL, 0)) == -1) { err("fcntl F_GETFL"); } sock_flags_new = sock_flags_old & ~O_NONBLOCK; if (sock_flags_new != sock_flags_old && fcntl(net, F_SETFL, sock_flags_new) == -1) { err("fcntl F_SETFL ~O_NONBLOCK"); } if(!client) { client = g_new0(CLIENT, 1); client->server=serve; client->exportsize=OFFT_MAX; client->net=net; client->transactionlogfd = -1; } if (set_peername(net, client)) { goto handle_connection_out; } if (!authorized_client(client)) { msg(LOG_INFO, "Unauthorized client"); goto handle_connection_out; } msg(LOG_INFO, "Authorized client"); if (!dontfork) { pid_t pid; int i; sigset_t newset; sigset_t oldset; sigemptyset(&newset); sigaddset(&newset, SIGCHLD); sigaddset(&newset, SIGTERM); sigprocmask(SIG_BLOCK, &newset, &oldset); if ((pid = fork()) < 0) { msg(LOG_INFO, "Could not fork (%s)", strerror(errno)); sigprocmask(SIG_SETMASK, &oldset, NULL); goto handle_connection_out; } if (pid > 0) { /* parent */ pid_t *pidp; pidp = g_malloc(sizeof(pid_t)); *pidp = pid; g_hash_table_insert(children, pidp, pidp); sigprocmask(SIG_SETMASK, &oldset, NULL); goto handle_connection_out; } /* child */ signal(SIGCHLD, SIG_DFL); signal(SIGTERM, SIG_DFL); signal(SIGHUP, SIG_DFL); sigprocmask(SIG_SETMASK, &oldset, NULL); g_hash_table_destroy(children); children = NULL; for(i=0;i<servers->len;i++) { serve=&g_array_index(servers, SERVER, i); close(serve->socket); } /* FALSE does not free the actual data. This is required, because the client has a direct reference into that data, and otherwise we get a segfault... */ g_array_free(servers, FALSE); for(i=0;i<modernsocks->len;i++) { close(g_array_index(modernsocks, int, i)); } g_array_free(modernsocks, TRUE); } msg(LOG_INFO, "Starting to serve"); serveconnection(client); exit(EXIT_SUCCESS); handle_connection_out: g_free(client); close(net); } /** * Return the index of the server whose servename matches the given * name. * * @param servename a string to match * @param servers an array of servers * @return the first index of the server whose servename matches the * given name or -1 if one cannot be found **/ static int get_index_by_servename(const gchar *const servename, const GArray *const servers) { int i; for (i = 0; i < servers->len; ++i) { const SERVER server = g_array_index(servers, SERVER, i); if (strcmp(servename, server.servename) == 0) return i; } return -1; } int setup_serve(SERVER *const serve, GError **const gerror); /** * Parse configuration files and add servers to the array if they don't * already exist there. The existence is tested by comparing * servenames. A server is appended to the array only if its servename * is unique among all other servers. * * @param servers an array of servers * @return the number of new servers appended to the array, or -1 in * case of an error **/ static int append_new_servers(GArray *const servers, GError **const gerror) { int i; GArray *new_servers; const int old_len = servers->len; int retval = -1; struct generic_conf genconf; new_servers = parse_cfile(config_file_pos, &genconf, gerror); if (!new_servers) goto out; for (i = 0; i < new_servers->len; ++i) { SERVER new_server = g_array_index(new_servers, SERVER, i); if (new_server.servename && -1 == get_index_by_servename(new_server.servename, servers)) { if (setup_serve(&new_server, gerror) == -1) goto out; if (append_serve(&new_server, servers) == -1) goto out; } } retval = servers->len - old_len; out: g_array_free(new_servers, TRUE); return retval; } /** * Loop through the available servers, and serve them. Never returns. **/ void serveloop(GArray* servers) { struct sockaddr_storage addrin; socklen_t addrinlen=sizeof(addrin); int i; int max; fd_set mset; fd_set rset; /* * Set up the master fd_set. The set of descriptors we need * to select() for never changes anyway and it buys us a *lot* * of time to only build this once. However, if we ever choose * to not fork() for clients anymore, we may have to revisit * this. */ max=0; FD_ZERO(&mset); for(i=0;i<servers->len;i++) { int sock; if((sock=(g_array_index(servers, SERVER, i)).socket) >= 0) { FD_SET(sock, &mset); max=sock>max?sock:max; } } for(i=0;i<modernsocks->len;i++) { int sock = g_array_index(modernsocks, int, i); FD_SET(sock, &mset); max=sock>max?sock:max; } for(;;) { /* SIGHUP causes the root server process to reconfigure * itself and add new export servers for each newly * found export configuration group, i.e. spawn new * server processes for each previously non-existent * export. This does not alter old runtime configuration * but just appends new exports. */ if (is_sighup_caught) { int n; GError *gerror = NULL; msg(LOG_INFO, "reconfiguration request received"); is_sighup_caught = 0; /* Reset to allow catching * it again. */ n = append_new_servers(servers, &gerror); if (n == -1) msg(LOG_ERR, "failed to append new servers: %s", gerror->message); for (i = servers->len - n; i < servers->len; ++i) { const SERVER server = g_array_index(servers, SERVER, i); if (server.socket >= 0) { FD_SET(server.socket, &mset); max = server.socket > max ? server.socket : max; } msg(LOG_INFO, "reconfigured new server: %s", server.servename); } } memcpy(&rset, &mset, sizeof(fd_set)); if(select(max+1, &rset, NULL, NULL, NULL)>0) { DEBUG("accept, "); for(i=0; i < modernsocks->len; i++) { int sock = g_array_index(modernsocks, int, i); if(!FD_ISSET(sock, &rset)) { continue; } handle_modern_connection(servers, sock); } for(i=0; i < servers->len; i++) { int net; SERVER *serve; serve=&(g_array_index(servers, SERVER, i)); if(serve->socket < 0) { continue; } if(FD_ISSET(serve->socket, &rset)) { if ((net=accept(serve->socket, (struct sockaddr *) &addrin, &addrinlen)) < 0) { err_nonfatal("accept: %m"); continue; } handle_connection(servers, net, serve, NULL); } } } } } void serveloop(GArray* servers) G_GNUC_NORETURN; /** * Set server socket options. * * @param socket a socket descriptor of the server * * @param gerror a pointer to an error object pointer used for reporting * errors. On error, if gerror is not NULL, *gerror is set and -1 * is returned. * * @return 0 on success, -1 on error **/ int dosockopts(const int socket, GError **const gerror) { #ifndef sun int yes=1; #else char yes='1'; #endif /* sun */ struct linger l; /* lose the pesky "Address already in use" error message */ if (setsockopt(socket,SOL_SOCKET,SO_REUSEADDR,&yes,sizeof(int)) == -1) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_SO_REUSEADDR, "failed to set socket option SO_REUSEADDR: %s", strerror(errno)); return -1; } l.l_onoff = 1; l.l_linger = 10; if (setsockopt(socket,SOL_SOCKET,SO_LINGER,&l,sizeof(l)) == -1) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_SO_LINGER, "failed to set socket option SO_LINGER: %s", strerror(errno)); return -1; } if (setsockopt(socket,SOL_SOCKET,SO_KEEPALIVE,&yes,sizeof(int)) == -1) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_SO_KEEPALIVE, "failed to set socket option SO_KEEPALIVE: %s", strerror(errno)); return -1; } return 0; } /** * Connect a server's socket. * * @param serve the server we want to connect. **/ int setup_serve(SERVER *const serve, GError **const gerror) { struct addrinfo hints; struct addrinfo *ai = NULL; gchar *port = NULL; int e; int retval = -1; /* Without this, it's possible that socket == 0, even if it's * not initialized at all. And that would be wrong because 0 is * totally legal value for properly initialized descriptor. This * line is required to ensure that unused/uninitialized * descriptors are marked as such (new style configuration * case). Currently, servers are being initialized in multiple * places, and some of the them do the socket initialization * incorrectly. This is the only point common to all code paths, * and therefore setting -1 is put here. However, the whole * server initialization procedure should be extracted to its * own function and all code paths wanting to mess with servers * should initialize servers with that function. * * TODO: fix server initialization */ serve->socket = -1; if(!(glob_flags & F_OLDSTYLE)) { return serve->servename ? 1 : 0; } memset(&hints,'\0',sizeof(hints)); hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG | AI_NUMERICSERV; hints.ai_socktype = SOCK_STREAM; hints.ai_family = serve->socket_family; port = g_strdup_printf("%d", serve->port); if (!port) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_SYS, "failed to open an export socket: " "failed to convert a port number to a string: %s", strerror(errno)); goto out; } e = getaddrinfo(serve->listenaddr,port,&hints,&ai); g_free(port); if(e != 0) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_GAI, "failed to open an export socket: " "failed to get address info: %s", gai_strerror(e)); goto out; } if(serve->socket_family == AF_UNSPEC) serve->socket_family = ai->ai_family; #ifdef WITH_SDP if ((serve->flags) && F_SDP) { if (ai->ai_family == AF_INET) ai->ai_family = AF_INET_SDP; else (ai->ai_family == AF_INET6) ai->ai_family = AF_INET6_SDP; } #endif if ((serve->socket = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol)) < 0) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_SOCKET, "failed to open an export socket: " "failed to create a socket: %s", strerror(errno)); goto out; } if (dosockopts(serve->socket, gerror) == -1) { g_prefix_error(gerror, "failed to open an export socket: "); goto out; } DEBUG("Waiting for connections... bind, "); e = bind(serve->socket, ai->ai_addr, ai->ai_addrlen); if (e != 0 && errno != EADDRINUSE) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_BIND, "failed to open an export socket: " "failed to bind an address to a socket: %s", strerror(errno)); goto out; } DEBUG("listen, "); if (listen(serve->socket, 1) < 0) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_BIND, "failed to open an export socket: " "failed to start listening on a socket: %s", strerror(errno)); goto out; } retval = serve->servename ? 1 : 0; out: if (retval == -1 && serve->socket >= 0) { close(serve->socket); serve->socket = -1; } freeaddrinfo (ai); return retval; } int open_modern(const gchar *const addr, const gchar *const port, GError **const gerror) { struct addrinfo hints; struct addrinfo* ai = NULL; struct addrinfo* ai_bak; struct sock_flags; int e; int retval = -1; int i=0; int sock = -1; memset(&hints, '\0', sizeof(hints)); hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG; hints.ai_socktype = SOCK_STREAM; hints.ai_family = AF_UNSPEC; hints.ai_protocol = IPPROTO_TCP; e = getaddrinfo(addr, port ? port : NBD_DEFAULT_PORT, &hints, &ai); ai_bak = ai; if(e != 0) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_GAI, "failed to open a modern socket: " "failed to get address info: %s", gai_strerror(e)); goto out; } while(ai != NULL) { sock = -1; if((sock = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol))<0) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_SOCKET, "failed to open a modern socket: " "failed to create a socket: %s", strerror(errno)); goto out; } if (dosockopts(sock, gerror) == -1) { g_prefix_error(gerror, "failed to open a modern socket: "); goto out; } if(bind(sock, ai->ai_addr, ai->ai_addrlen)) { /* This is so wrong. * * Linux will return multiple entries for the * same system when we ask it for something * AF_UNSPEC, even though the first entry will * listen to both protocols. Other systems will * return multiple entries too, but we actually * do need to open both. Sigh. * * Handle it by ignoring EADDRINUSE if we've * already got at least one socket open */ if(errno == EADDRINUSE && modernsocks->len > 0) { goto next; } g_set_error(gerror, NBDS_ERR, NBDS_ERR_BIND, "failed to open a modern socket: " "failed to bind an address to a socket: %s", strerror(errno)); goto out; } if(listen(sock, 10) <0) { g_set_error(gerror, NBDS_ERR, NBDS_ERR_BIND, "failed to open a modern socket: " "failed to start listening on a socket: %s", strerror(errno)); goto out; } g_array_append_val(modernsocks, sock); next: ai = ai->ai_next; } retval = 0; out: if (retval == -1 && sock >= 0) { close(sock); } if(ai_bak) freeaddrinfo(ai_bak); return retval; } /** * Connect our servers. **/ void setup_servers(GArray *const servers, const gchar *const modernaddr, const gchar *const modernport) { int i; struct sigaction sa; int want_modern=0; for(i=0;i<servers->len;i++) { GError *gerror = NULL; SERVER *server = &g_array_index(servers, SERVER, i); int ret; ret = setup_serve(server, &gerror); if (ret == -1) { msg(LOG_ERR, "failed to setup servers: %s", gerror->message); g_clear_error(&gerror); exit(EXIT_FAILURE); } want_modern |= ret; } if(want_modern) { GError *gerror = NULL; if (open_modern(modernaddr, modernport, &gerror) == -1) { msg(LOG_ERR, "failed to setup servers: %s", gerror->message); g_clear_error(&gerror); exit(EXIT_FAILURE); } } children=g_hash_table_new_full(g_int_hash, g_int_equal, NULL, destroy_pid_t); sa.sa_handler = sigchld_handler; sigemptyset(&sa.sa_mask); sigaddset(&sa.sa_mask, SIGTERM); sa.sa_flags = SA_RESTART; if(sigaction(SIGCHLD, &sa, NULL) == -1) err("sigaction: %m"); sa.sa_handler = sigterm_handler; sigemptyset(&sa.sa_mask); sigaddset(&sa.sa_mask, SIGCHLD); sa.sa_flags = SA_RESTART; if(sigaction(SIGTERM, &sa, NULL) == -1) err("sigaction: %m"); sa.sa_handler = sighup_handler; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART; if(sigaction(SIGHUP, &sa, NULL) == -1) err("sigaction: %m"); } /** * Go daemon (unless we specified at compile time that we didn't want this) * @param serve the first server of our configuration. If its port is zero, * then do not daemonize, because we're doing inetd then. This parameter * is only used to create a PID file of the form * /var/run/nbd-server.&lt;port&gt;.pid; it's not modified in any way. **/ #if !defined(NODAEMON) void daemonize(SERVER* serve) { FILE*pidf; if(serve && !(serve->port)) { return; } if(daemon(0,0)<0) { err("daemon"); } if(!*pidftemplate) { if(serve) { strncpy(pidftemplate, "/var/run/nbd-server.%d.pid", 255); } else { strncpy(pidftemplate, "/var/run/nbd-server.pid", 255); } } snprintf(pidfname, 255, pidftemplate, serve ? serve->port : 0); pidf=fopen(pidfname, "w"); if(pidf) { fprintf(pidf,"%d\n", (int)getpid()); fclose(pidf); } else { perror("fopen"); fprintf(stderr, "Not fatal; continuing"); } } #else #define daemonize(serve) #endif /* !defined(NODAEMON) */ /* * Everything beyond this point (in the file) is run in non-daemon mode. * The stuff above daemonize() isn't. */ /** * Set up user-ID and/or group-ID **/ void dousers(const gchar *const username, const gchar *const groupname) { struct passwd *pw; struct group *gr; gchar* str; if (groupname) { gr = getgrnam(groupname); if(!gr) { str = g_strdup_printf("Invalid group name: %s", groupname); err(str); } if(setgid(gr->gr_gid)<0) { err("Could not set GID: %m"); } } if (username) { pw = getpwnam(username); if(!pw) { str = g_strdup_printf("Invalid user name: %s", username); err(str); } if(setuid(pw->pw_uid)<0) { err("Could not set UID: %m"); } } } #ifndef ISSERVER void glib_message_syslog_redirect(const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { int level=LOG_DEBUG; switch( log_level ) { case G_LOG_FLAG_FATAL: case G_LOG_LEVEL_CRITICAL: case G_LOG_LEVEL_ERROR: level=LOG_ERR; break; case G_LOG_LEVEL_WARNING: level=LOG_WARNING; break; case G_LOG_LEVEL_MESSAGE: case G_LOG_LEVEL_INFO: level=LOG_INFO; break; case G_LOG_LEVEL_DEBUG: level=LOG_DEBUG; break; default: level=LOG_ERR; } syslog(level, "%s", message); } #endif /** * Main entry point... **/ int main(int argc, char *argv[]) { SERVER *serve; GArray *servers; GError *err=NULL; struct generic_conf genconf; memset(&genconf, 0, sizeof(struct generic_conf)); if (sizeof( struct nbd_request )!=28) { fprintf(stderr,"Bad size of structure. Alignment problems?\n"); exit(EXIT_FAILURE) ; } memset(pidftemplate, '\0', 256); modernsocks = g_array_new(FALSE, FALSE, sizeof(int)); logging(); config_file_pos = g_strdup(CFILE); serve=cmdline(argc, argv); servers = parse_cfile(config_file_pos, &genconf, &err); /* Update global variables with parsed values. This will be * removed once we get rid of global configuration variables. */ glob_flags |= genconf.flags; if(serve) { serve->socket_family = AF_UNSPEC; append_serve(serve, servers); if (!(serve->port)) { CLIENT *client; #ifndef ISSERVER /* You really should define ISSERVER if you're going to use * inetd mode, but if you don't, closing stdout and stderr * (which inetd had connected to the client socket) will let it * work. */ close(1); close(2); open("/dev/null", O_WRONLY); open("/dev/null", O_WRONLY); g_log_set_default_handler( glib_message_syslog_redirect, NULL ); #endif client=g_malloc(sizeof(CLIENT)); client->server=serve; client->net=-1; client->exportsize=OFFT_MAX; if (set_peername(0, client)) exit(EXIT_FAILURE); serveconnection(client); return 0; } } if(!servers || !servers->len) { if(err && !(err->domain == NBDS_ERR && err->code == NBDS_ERR_CFILE_NOTFOUND)) { g_warning("Could not parse config file: %s", err ? err->message : "Unknown error"); } } if(serve) { g_warning("Specifying an export on the command line is deprecated."); g_warning("Please use a configuration file instead."); } if((!serve) && (!servers||!servers->len)) { if(err) g_message("No configured exports; quitting."); exit(EXIT_FAILURE); } if (!dontfork) daemonize(serve); setup_servers(servers, genconf.modernaddr, genconf.modernport); dousers(genconf.user, genconf.group); serveloop(servers); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_5863_0
crossvul-cpp_data_good_1472_0
/* * CUSE: Character device in Userspace * * Copyright (C) 2008-2009 SUSE Linux Products GmbH * Copyright (C) 2008-2009 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * CUSE enables character devices to be implemented from userland much * like FUSE allows filesystems. On initialization /dev/cuse is * created. By opening the file and replying to the CUSE_INIT request * userland CUSE server can create a character device. After that the * operation is very similar to FUSE. * * A CUSE instance involves the following objects. * * cuse_conn : contains fuse_conn and serves as bonding structure * channel : file handle connected to the userland CUSE server * cdev : the implemented character device * dev : generic device for cdev * * Note that 'channel' is what 'dev' is in FUSE. As CUSE deals with * devices, it's called 'channel' to reduce confusion. * * channel determines when the character device dies. When channel is * closed, everything begins to destruct. The cuse_conn is taken off * the lookup table preventing further access from cdev, cdev and * generic device are removed and the base reference of cuse_conn is * put. * * On each open, the matching cuse_conn is looked up and if found an * additional reference is taken which is released when the file is * closed. */ #include <linux/fuse.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/kdev_t.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/magic.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/module.h> #include <linux/uio.h> #include "fuse_i.h" #define CUSE_CONNTBL_LEN 64 struct cuse_conn { struct list_head list; /* linked on cuse_conntbl */ struct fuse_conn fc; /* fuse connection */ struct cdev *cdev; /* associated character device */ struct device *dev; /* device representing @cdev */ /* init parameters, set once during initialization */ bool unrestricted_ioctl; }; static DEFINE_MUTEX(cuse_lock); /* protects registration */ static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; static struct class *cuse_class; static struct cuse_conn *fc_to_cc(struct fuse_conn *fc) { return container_of(fc, struct cuse_conn, fc); } static struct list_head *cuse_conntbl_head(dev_t devt) { return &cuse_conntbl[(MAJOR(devt) + MINOR(devt)) % CUSE_CONNTBL_LEN]; } /************************************************************************** * CUSE frontend operations * * These are file operations for the character device. * * On open, CUSE opens a file from the FUSE mnt and stores it to * private_data of the open file. All other ops call FUSE ops on the * FUSE file. */ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to) { struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp }; loff_t pos = 0; return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE); } static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from) { struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp }; loff_t pos = 0; /* * No locking or generic_write_checks(), the server is * responsible for locking and sanity checks. */ return fuse_direct_io(&io, from, &pos, FUSE_DIO_WRITE | FUSE_DIO_CUSE); } static int cuse_open(struct inode *inode, struct file *file) { dev_t devt = inode->i_cdev->dev; struct cuse_conn *cc = NULL, *pos; int rc; /* look up and get the connection */ mutex_lock(&cuse_lock); list_for_each_entry(pos, cuse_conntbl_head(devt), list) if (pos->dev->devt == devt) { fuse_conn_get(&pos->fc); cc = pos; break; } mutex_unlock(&cuse_lock); /* dead? */ if (!cc) return -ENODEV; /* * Generic permission check is already done against the chrdev * file, proceed to open. */ rc = fuse_do_open(&cc->fc, 0, file, 0); if (rc) fuse_conn_put(&cc->fc); return rc; } static int cuse_release(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; fuse_sync_release(ff, file->f_flags); fuse_conn_put(fc); return 0; } static long cuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fuse_file *ff = file->private_data; struct cuse_conn *cc = fc_to_cc(ff->fc); unsigned int flags = 0; if (cc->unrestricted_ioctl) flags |= FUSE_IOCTL_UNRESTRICTED; return fuse_do_ioctl(file, cmd, arg, flags); } static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fuse_file *ff = file->private_data; struct cuse_conn *cc = fc_to_cc(ff->fc); unsigned int flags = FUSE_IOCTL_COMPAT; if (cc->unrestricted_ioctl) flags |= FUSE_IOCTL_UNRESTRICTED; return fuse_do_ioctl(file, cmd, arg, flags); } static const struct file_operations cuse_frontend_fops = { .owner = THIS_MODULE, .read_iter = cuse_read_iter, .write_iter = cuse_write_iter, .open = cuse_open, .release = cuse_release, .unlocked_ioctl = cuse_file_ioctl, .compat_ioctl = cuse_file_compat_ioctl, .poll = fuse_file_poll, .llseek = noop_llseek, }; /************************************************************************** * CUSE channel initialization and destruction */ struct cuse_devinfo { const char *name; }; /** * cuse_parse_one - parse one key=value pair * @pp: i/o parameter for the current position * @end: points to one past the end of the packed string * @keyp: out parameter for key * @valp: out parameter for value * * *@pp points to packed strings - "key0=val0\0key1=val1\0" which ends * at @end - 1. This function parses one pair and set *@keyp to the * start of the key and *@valp to the start of the value. Note that * the original string is modified such that the key string is * terminated with '\0'. *@pp is updated to point to the next string. * * RETURNS: * 1 on successful parse, 0 on EOF, -errno on failure. */ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) { char *p = *pp; char *key, *val; while (p < end && *p == '\0') p++; if (p == end) return 0; if (end[-1] != '\0') { printk(KERN_ERR "CUSE: info not properly terminated\n"); return -EINVAL; } key = val = p; p += strlen(p); if (valp) { strsep(&val, "="); if (!val) val = key + strlen(key); key = strstrip(key); val = strstrip(val); } else key = strstrip(key); if (!strlen(key)) { printk(KERN_ERR "CUSE: zero length info key specified\n"); return -EINVAL; } *pp = p; *keyp = key; if (valp) *valp = val; return 1; } /** * cuse_parse_dev_info - parse device info * @p: device info string * @len: length of device info string * @devinfo: out parameter for parsed device info * * Parse @p to extract device info and store it into @devinfo. String * pointed to by @p is modified by parsing and @devinfo points into * them, so @p shouldn't be freed while @devinfo is in use. * * RETURNS: * 0 on success, -errno on failure. */ static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) { char *end = p + len; char *uninitialized_var(key), *uninitialized_var(val); int rc; while (true) { rc = cuse_parse_one(&p, end, &key, &val); if (rc < 0) return rc; if (!rc) break; if (strcmp(key, "DEVNAME") == 0) devinfo->name = val; else printk(KERN_WARNING "CUSE: unknown device info \"%s\"\n", key); } if (!devinfo->name || !strlen(devinfo->name)) { printk(KERN_ERR "CUSE: DEVNAME unspecified\n"); return -EINVAL; } return 0; } static void cuse_gendev_release(struct device *dev) { kfree(dev); } /** * cuse_process_init_reply - finish initializing CUSE channel * * This function creates the character device and sets up all the * required data structures for it. Please read the comment at the * top of this file for high level overview. */ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) { struct cuse_conn *cc = fc_to_cc(fc), *pos; struct cuse_init_out *arg = req->out.args[0].value; struct page *page = req->pages[0]; struct cuse_devinfo devinfo = { }; struct device *dev; struct cdev *cdev; dev_t devt; int rc, i; if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { goto err; } fc->minor = arg->minor; fc->max_read = max_t(unsigned, arg->max_read, 4096); fc->max_write = max_t(unsigned, arg->max_write, 4096); /* parse init reply */ cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL; rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size, &devinfo); if (rc) goto err; /* determine and reserve devt */ devt = MKDEV(arg->dev_major, arg->dev_minor); if (!MAJOR(devt)) rc = alloc_chrdev_region(&devt, MINOR(devt), 1, devinfo.name); else rc = register_chrdev_region(devt, 1, devinfo.name); if (rc) { printk(KERN_ERR "CUSE: failed to register chrdev region\n"); goto err; } /* devt determined, create device */ rc = -ENOMEM; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) goto err_region; device_initialize(dev); dev_set_uevent_suppress(dev, 1); dev->class = cuse_class; dev->devt = devt; dev->release = cuse_gendev_release; dev_set_drvdata(dev, cc); dev_set_name(dev, "%s", devinfo.name); mutex_lock(&cuse_lock); /* make sure the device-name is unique */ for (i = 0; i < CUSE_CONNTBL_LEN; ++i) { list_for_each_entry(pos, &cuse_conntbl[i], list) if (!strcmp(dev_name(pos->dev), dev_name(dev))) goto err_unlock; } rc = device_add(dev); if (rc) goto err_unlock; /* register cdev */ rc = -ENOMEM; cdev = cdev_alloc(); if (!cdev) goto err_unlock; cdev->owner = THIS_MODULE; cdev->ops = &cuse_frontend_fops; rc = cdev_add(cdev, devt, 1); if (rc) goto err_cdev; cc->dev = dev; cc->cdev = cdev; /* make the device available */ list_add(&cc->list, cuse_conntbl_head(devt)); mutex_unlock(&cuse_lock); /* announce device availability */ dev_set_uevent_suppress(dev, 0); kobject_uevent(&dev->kobj, KOBJ_ADD); out: kfree(arg); __free_page(page); return; err_cdev: cdev_del(cdev); err_unlock: mutex_unlock(&cuse_lock); put_device(dev); err_region: unregister_chrdev_region(devt, 1); err: fuse_abort_conn(fc); goto out; } static int cuse_send_init(struct cuse_conn *cc) { int rc; struct fuse_req *req; struct page *page; struct fuse_conn *fc = &cc->fc; struct cuse_init_in *arg; void *outarg; BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); req = fuse_get_req_for_background(fc, 1); if (IS_ERR(req)) { rc = PTR_ERR(req); goto err; } rc = -ENOMEM; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto err_put_req; outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL); if (!outarg) goto err_free_page; arg = &req->misc.cuse_init_in; arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; arg->flags |= CUSE_UNRESTRICTED_IOCTL; req->in.h.opcode = CUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(struct cuse_init_in); req->in.args[0].value = arg; req->out.numargs = 2; req->out.args[0].size = sizeof(struct cuse_init_out); req->out.args[0].value = outarg; req->out.args[1].size = CUSE_INIT_INFO_MAX; req->out.argvar = 1; req->out.argpages = 1; req->pages[0] = page; req->page_descs[0].length = req->out.args[1].size; req->num_pages = 1; req->end = cuse_process_init_reply; fuse_request_send_background(fc, req); return 0; err_free_page: __free_page(page); err_put_req: fuse_put_request(fc, req); err: return rc; } static void cuse_fc_release(struct fuse_conn *fc) { struct cuse_conn *cc = fc_to_cc(fc); kfree_rcu(cc, fc.rcu); } /** * cuse_channel_open - open method for /dev/cuse * @inode: inode for /dev/cuse * @file: file struct being opened * * Userland CUSE server can create a CUSE device by opening /dev/cuse * and replying to the initialization request kernel sends. This * function is responsible for handling CUSE device initialization. * Because the fd opened by this function is used during * initialization, this function only creates cuse_conn and sends * init. The rest is delegated to a kthread. * * RETURNS: * 0 on success, -errno on failure. */ static int cuse_channel_open(struct inode *inode, struct file *file) { struct fuse_dev *fud; struct cuse_conn *cc; int rc; /* set up cuse_conn */ cc = kzalloc(sizeof(*cc), GFP_KERNEL); if (!cc) return -ENOMEM; fuse_conn_init(&cc->fc); fud = fuse_dev_alloc(&cc->fc); if (!fud) { kfree(cc); return -ENOMEM; } INIT_LIST_HEAD(&cc->list); cc->fc.release = cuse_fc_release; cc->fc.initialized = 1; rc = cuse_send_init(cc); if (rc) { fuse_dev_free(fud); return rc; } file->private_data = fud; return 0; } /** * cuse_channel_release - release method for /dev/cuse * @inode: inode for /dev/cuse * @file: file struct being closed * * Disconnect the channel, deregister CUSE device and initiate * destruction by putting the default reference. * * RETURNS: * 0 on success, -errno on failure. */ static int cuse_channel_release(struct inode *inode, struct file *file) { struct fuse_dev *fud = file->private_data; struct cuse_conn *cc = fc_to_cc(fud->fc); int rc; /* remove from the conntbl, no more access from this point on */ mutex_lock(&cuse_lock); list_del_init(&cc->list); mutex_unlock(&cuse_lock); /* remove device */ if (cc->dev) device_unregister(cc->dev); if (cc->cdev) { unregister_chrdev_region(cc->cdev->dev, 1); cdev_del(cc->cdev); } /* Base reference is now owned by "fud" */ fuse_conn_put(&cc->fc); rc = fuse_dev_release(inode, file); /* puts the base reference */ return rc; } static struct file_operations cuse_channel_fops; /* initialized during init */ /************************************************************************** * Misc stuff and module initializatiion * * CUSE exports the same set of attributes to sysfs as fusectl. */ static ssize_t cuse_class_waiting_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cuse_conn *cc = dev_get_drvdata(dev); return sprintf(buf, "%d\n", atomic_read(&cc->fc.num_waiting)); } static DEVICE_ATTR(waiting, 0400, cuse_class_waiting_show, NULL); static ssize_t cuse_class_abort_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cuse_conn *cc = dev_get_drvdata(dev); fuse_abort_conn(&cc->fc); return count; } static DEVICE_ATTR(abort, 0200, NULL, cuse_class_abort_store); static struct attribute *cuse_class_dev_attrs[] = { &dev_attr_waiting.attr, &dev_attr_abort.attr, NULL, }; ATTRIBUTE_GROUPS(cuse_class_dev); static struct miscdevice cuse_miscdev = { .minor = CUSE_MINOR, .name = "cuse", .fops = &cuse_channel_fops, }; MODULE_ALIAS_MISCDEV(CUSE_MINOR); MODULE_ALIAS("devname:cuse"); static int __init cuse_init(void) { int i, rc; /* init conntbl */ for (i = 0; i < CUSE_CONNTBL_LEN; i++) INIT_LIST_HEAD(&cuse_conntbl[i]); /* inherit and extend fuse_dev_operations */ cuse_channel_fops = fuse_dev_operations; cuse_channel_fops.owner = THIS_MODULE; cuse_channel_fops.open = cuse_channel_open; cuse_channel_fops.release = cuse_channel_release; cuse_class = class_create(THIS_MODULE, "cuse"); if (IS_ERR(cuse_class)) return PTR_ERR(cuse_class); cuse_class->dev_groups = cuse_class_dev_groups; rc = misc_register(&cuse_miscdev); if (rc) { class_destroy(cuse_class); return rc; } return 0; } static void __exit cuse_exit(void) { misc_deregister(&cuse_miscdev); class_destroy(cuse_class); } module_init(cuse_init); module_exit(cuse_exit); MODULE_AUTHOR("Tejun Heo <tj@kernel.org>"); MODULE_DESCRIPTION("Character device in Userspace"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-399/c/good_1472_0
crossvul-cpp_data_bad_5625_0
/* * linux/fs/ext4/namei.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * Directory entry file type support and forward compatibility hooks * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998 * Hash Tree Directory indexing (c) * Daniel Phillips, 2001 * Hash Tree Directory indexing porting * Christopher Li, 2002 * Hash Tree Directory indexing cleanup * Theodore Ts'o, 2002 */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/jbd2.h> #include <linux/time.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include <linux/bio.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include <trace/events/ext4.h> /* * define how far ahead to read directories while searching them. */ #define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) static struct buffer_head *ext4_append(handle_t *handle, struct inode *inode, ext4_lblk_t *block, int *err) { struct buffer_head *bh; if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb && ((inode->i_size >> 10) >= EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) { *err = -ENOSPC; return NULL; } *block = inode->i_size >> inode->i_sb->s_blocksize_bits; bh = ext4_bread(handle, inode, *block, 1, err); if (bh) { inode->i_size += inode->i_sb->s_blocksize; EXT4_I(inode)->i_disksize = inode->i_size; *err = ext4_journal_get_write_access(handle, bh); if (*err) { brelse(bh); bh = NULL; } } if (!bh && !(*err)) { *err = -EIO; ext4_error(inode->i_sb, "Directory hole detected on inode %lu\n", inode->i_ino); } return bh; } #ifndef assert #define assert(test) J_ASSERT(test) #endif #ifdef DX_DEBUG #define dxtrace(command) command #else #define dxtrace(command) #endif struct fake_dirent { __le32 inode; __le16 rec_len; u8 name_len; u8 file_type; }; struct dx_countlimit { __le16 limit; __le16 count; }; struct dx_entry { __le32 hash; __le32 block; }; /* * dx_root_info is laid out so that if it should somehow get overlaid by a * dirent the two low bits of the hash version will be zero. Therefore, the * hash version mod 4 should never be 0. Sincerely, the paranoia department. */ struct dx_root { struct fake_dirent dot; char dot_name[4]; struct fake_dirent dotdot; char dotdot_name[4]; struct dx_root_info { __le32 reserved_zero; u8 hash_version; u8 info_length; /* 8 */ u8 indirect_levels; u8 unused_flags; } info; struct dx_entry entries[0]; }; struct dx_node { struct fake_dirent fake; struct dx_entry entries[0]; }; struct dx_frame { struct buffer_head *bh; struct dx_entry *entries; struct dx_entry *at; }; struct dx_map_entry { u32 hash; u16 offs; u16 size; }; /* * This goes at the end of each htree block. */ struct dx_tail { u32 dt_reserved; __le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */ }; static inline ext4_lblk_t dx_get_block(struct dx_entry *entry); static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value); static inline unsigned dx_get_hash(struct dx_entry *entry); static void dx_set_hash(struct dx_entry *entry, unsigned value); static unsigned dx_get_count(struct dx_entry *entries); static unsigned dx_get_limit(struct dx_entry *entries); static void dx_set_count(struct dx_entry *entries, unsigned value); static void dx_set_limit(struct dx_entry *entries, unsigned value); static unsigned dx_root_limit(struct inode *dir, unsigned infosize); static unsigned dx_node_limit(struct inode *dir); static struct dx_frame *dx_probe(const struct qstr *d_name, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame, int *err); static void dx_release(struct dx_frame *frames); static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry map[]); static void dx_sort_map(struct dx_map_entry *map, unsigned count); static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to, struct dx_map_entry *offsets, int count, unsigned blocksize); static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize); static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block); static int ext4_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash); static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *err); static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode); /* checksumming functions */ void initialize_dirent_tail(struct ext4_dir_entry_tail *t, unsigned int blocksize) { memset(t, 0, sizeof(struct ext4_dir_entry_tail)); t->det_rec_len = ext4_rec_len_to_disk( sizeof(struct ext4_dir_entry_tail), blocksize); t->det_reserved_ft = EXT4_FT_DIR_CSUM; } /* Walk through a dirent block to find a checksum "dirent" at the tail */ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode, struct ext4_dir_entry *de) { struct ext4_dir_entry_tail *t; #ifdef PARANOID struct ext4_dir_entry *d, *top; d = de; top = (struct ext4_dir_entry *)(((void *)de) + (EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct ext4_dir_entry_tail))); while (d < top && d->rec_len) d = (struct ext4_dir_entry *)(((void *)d) + le16_to_cpu(d->rec_len)); if (d != top) return NULL; t = (struct ext4_dir_entry_tail *)d; #else t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb)); #endif if (t->det_reserved_zero1 || le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) || t->det_reserved_zero2 || t->det_reserved_ft != EXT4_FT_DIR_CSUM) return NULL; return t; } static __le32 ext4_dirent_csum(struct inode *inode, struct ext4_dir_entry *dirent, int size) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); __u32 csum; csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); return cpu_to_le32(csum); } static void warn_no_space_for_csum(struct inode *inode) { ext4_warning(inode->i_sb, "no space in directory inode %lu leaf for " "checksum. Please run e2fsck -D.", inode->i_ino); } int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) { struct ext4_dir_entry_tail *t; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return 1; t = get_dirent_tail(inode, dirent); if (!t) { warn_no_space_for_csum(inode); return 0; } if (t->det_checksum != ext4_dirent_csum(inode, dirent, (void *)t - (void *)dirent)) return 0; return 1; } static void ext4_dirent_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) { struct ext4_dir_entry_tail *t; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return; t = get_dirent_tail(inode, dirent); if (!t) { warn_no_space_for_csum(inode); return; } t->det_checksum = ext4_dirent_csum(inode, dirent, (void *)t - (void *)dirent); } int ext4_handle_dirty_dirent_node(handle_t *handle, struct inode *inode, struct buffer_head *bh) { ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data); return ext4_handle_dirty_metadata(handle, inode, bh); } static struct dx_countlimit *get_dx_countlimit(struct inode *inode, struct ext4_dir_entry *dirent, int *offset) { struct ext4_dir_entry *dp; struct dx_root_info *root; int count_offset; if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb)) count_offset = 8; else if (le16_to_cpu(dirent->rec_len) == 12) { dp = (struct ext4_dir_entry *)(((void *)dirent) + 12); if (le16_to_cpu(dp->rec_len) != EXT4_BLOCK_SIZE(inode->i_sb) - 12) return NULL; root = (struct dx_root_info *)(((void *)dp + 12)); if (root->reserved_zero || root->info_length != sizeof(struct dx_root_info)) return NULL; count_offset = 32; } else return NULL; if (offset) *offset = count_offset; return (struct dx_countlimit *)(((void *)dirent) + count_offset); } static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent, int count_offset, int count, struct dx_tail *t) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); __u32 csum, old_csum; int size; size = count_offset + (count * sizeof(struct dx_entry)); old_csum = t->dt_checksum; t->dt_checksum = 0; csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail)); t->dt_checksum = old_csum; return cpu_to_le32(csum); } static int ext4_dx_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) { struct dx_countlimit *c; struct dx_tail *t; int count_offset, limit, count; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return 1; c = get_dx_countlimit(inode, dirent, &count_offset); if (!c) { EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); return 1; } limit = le16_to_cpu(c->limit); count = le16_to_cpu(c->count); if (count_offset + (limit * sizeof(struct dx_entry)) > EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { warn_no_space_for_csum(inode); return 1; } t = (struct dx_tail *)(((struct dx_entry *)c) + limit); if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset, count, t)) return 0; return 1; } static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) { struct dx_countlimit *c; struct dx_tail *t; int count_offset, limit, count; if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) return; c = get_dx_countlimit(inode, dirent, &count_offset); if (!c) { EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); return; } limit = le16_to_cpu(c->limit); count = le16_to_cpu(c->count); if (count_offset + (limit * sizeof(struct dx_entry)) > EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { warn_no_space_for_csum(inode); return; } t = (struct dx_tail *)(((struct dx_entry *)c) + limit); t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t); } static inline int ext4_handle_dirty_dx_node(handle_t *handle, struct inode *inode, struct buffer_head *bh) { ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data); return ext4_handle_dirty_metadata(handle, inode, bh); } /* * p is at least 6 bytes before the end of page */ static inline struct ext4_dir_entry_2 * ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize) { return (struct ext4_dir_entry_2 *)((char *)p + ext4_rec_len_from_disk(p->rec_len, blocksize)); } /* * Future: use high four bits of block for coalesce-on-delete flags * Mask them off for now. */ static inline ext4_lblk_t dx_get_block(struct dx_entry *entry) { return le32_to_cpu(entry->block) & 0x00ffffff; } static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value) { entry->block = cpu_to_le32(value); } static inline unsigned dx_get_hash(struct dx_entry *entry) { return le32_to_cpu(entry->hash); } static inline void dx_set_hash(struct dx_entry *entry, unsigned value) { entry->hash = cpu_to_le32(value); } static inline unsigned dx_get_count(struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->count); } static inline unsigned dx_get_limit(struct dx_entry *entries) { return le16_to_cpu(((struct dx_countlimit *) entries)->limit); } static inline void dx_set_count(struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->count = cpu_to_le16(value); } static inline void dx_set_limit(struct dx_entry *entries, unsigned value) { ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); } static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) { unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - EXT4_DIR_REC_LEN(2) - infosize; if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } static inline unsigned dx_node_limit(struct inode *dir) { unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } /* * Debug */ #ifdef DX_DEBUG static void dx_show_index(char * label, struct dx_entry *entries) { int i, n = dx_get_count (entries); printk(KERN_DEBUG "%s index ", label); for (i = 0; i < n; i++) { printk("%x->%lu ", i ? dx_get_hash(entries + i) : 0, (unsigned long)dx_get_block(entries + i)); } printk("\n"); } struct stats { unsigned names; unsigned space; unsigned bcount; }; static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de, int size, int show_names) { unsigned names = 0, space = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; printk("names: "); while ((char *) de < base + size) { if (de->inode) { if (show_names) { int len = de->name_len; char *name = de->name; while (len--) printk("%c", *name++); ext4fs_dirhash(de->name, de->name_len, &h); printk(":%x.%u ", h.hash, (unsigned) ((char *) de - base)); } space += EXT4_DIR_REC_LEN(de->name_len); names++; } de = ext4_next_entry(de, size); } printk("(%i)\n", names); return (struct stats) { names, space, 1 }; } struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, struct dx_entry *entries, int levels) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count = dx_get_count(entries), names = 0, space = 0, i; unsigned bcount = 0; struct buffer_head *bh; int err; printk("%i indexed blocks...\n", count); for (i = 0; i < count; i++, entries++) { ext4_lblk_t block = dx_get_block(entries); ext4_lblk_t hash = i ? dx_get_hash(entries): 0; u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; struct stats stats; printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range); if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue; stats = levels? dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0); names += stats.names; space += stats.space; bcount += stats.bcount; brelse(bh); } if (bcount) printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n", levels ? "" : " ", names, space/bcount, (space/bcount)*100/blocksize); return (struct stats) { names, space, bcount}; } #endif /* DX_DEBUG */ /* * Probe for a directory leaf block to search. * * dx_probe can return ERR_BAD_DX_DIR, which means there was a format * error in the directory index, and the caller should fall back to * searching the directory normally. The callers of dx_probe **MUST** * check for this error code, and make sure it never gets reflected * back to userspace. */ static struct dx_frame * dx_probe(const struct qstr *d_name, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err) { unsigned count, indirect; struct dx_entry *at, *entries, *p, *q, *m; struct dx_root *root; struct buffer_head *bh; struct dx_frame *frame = frame_in; u32 hash; frame->bh = NULL; if (!(bh = ext4_bread(NULL, dir, 0, 0, err))) { if (*err == 0) *err = ERR_BAD_DX_DIR; goto fail; } root = (struct dx_root *) bh->b_data; if (root->info.hash_version != DX_HASH_TEA && root->info.hash_version != DX_HASH_HALF_MD4 && root->info.hash_version != DX_HASH_LEGACY) { ext4_warning(dir->i_sb, "Unrecognised inode hash code %d", root->info.hash_version); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } hinfo->hash_version = root->info.hash_version; if (hinfo->hash_version <= DX_HASH_TEA) hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; if (d_name) ext4fs_dirhash(d_name->name, d_name->len, hinfo); hash = hinfo->hash; if (root->info.unused_flags & 1) { ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x", root->info.unused_flags); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } if ((indirect = root->info.indirect_levels) > 1) { ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x", root->info.indirect_levels); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } if (!buffer_verified(bh) && !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { ext4_warning(dir->i_sb, "Root failed checksum"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } set_buffer_verified(bh); entries = (struct dx_entry *) (((char *)&root->info) + root->info.info_length); if (dx_get_limit(entries) != dx_root_limit(dir, root->info.info_length)) { ext4_warning(dir->i_sb, "dx entry: limit != root limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } dxtrace(printk("Look up %x", hash)); while (1) { count = dx_get_count(entries); if (!count || count > dx_get_limit(entries)) { ext4_warning(dir->i_sb, "dx entry: no count or count > limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail2; } p = entries + 1; q = entries + count - 1; while (p <= q) { m = p + (q - p)/2; dxtrace(printk(".")); if (dx_get_hash(m) > hash) q = m - 1; else p = m + 1; } if (0) // linear search cross check { unsigned n = count - 1; at = entries; while (n--) { dxtrace(printk(",")); if (dx_get_hash(++at) > hash) { at--; break; } } assert (at == p - 1); } at = p - 1; dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at))); frame->bh = bh; frame->entries = entries; frame->at = at; if (!indirect--) return frame; if (!(bh = ext4_bread(NULL, dir, dx_get_block(at), 0, err))) { if (!(*err)) *err = ERR_BAD_DX_DIR; goto fail2; } at = entries = ((struct dx_node *) bh->b_data)->entries; if (!buffer_verified(bh) && !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { ext4_warning(dir->i_sb, "Node failed checksum"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } set_buffer_verified(bh); if (dx_get_limit(entries) != dx_node_limit (dir)) { ext4_warning(dir->i_sb, "dx entry: limit != node limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail2; } frame++; frame->bh = NULL; } fail2: while (frame >= frame_in) { brelse(frame->bh); frame--; } fail: if (*err == ERR_BAD_DX_DIR) ext4_warning(dir->i_sb, "Corrupt dir inode %lu, running e2fsck is " "recommended.", dir->i_ino); return NULL; } static void dx_release (struct dx_frame *frames) { if (frames[0].bh == NULL) return; if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels) brelse(frames[1].bh); brelse(frames[0].bh); } /* * This function increments the frame pointer to search the next leaf * block, and reads in the necessary intervening nodes if the search * should be necessary. Whether or not the search is necessary is * controlled by the hash parameter. If the hash value is even, then * the search is only continued if the next block starts with that * hash value. This is used if we are searching for a specific file. * * If the hash value is HASH_NB_ALWAYS, then always go to the next block. * * This function returns 1 if the caller should continue to search, * or 0 if it should not. If there is an error reading one of the * index blocks, it will a negative error code. * * If start_hash is non-null, it will be filled in with the starting * hash of the next page. */ static int ext4_htree_next_block(struct inode *dir, __u32 hash, struct dx_frame *frame, struct dx_frame *frames, __u32 *start_hash) { struct dx_frame *p; struct buffer_head *bh; int err, num_frames = 0; __u32 bhash; p = frame; /* * Find the next leaf page by incrementing the frame pointer. * If we run out of entries in the interior node, loop around and * increment pointer in the parent node. When we break out of * this loop, num_frames indicates the number of interior * nodes need to be read. */ while (1) { if (++(p->at) < p->entries + dx_get_count(p->entries)) break; if (p == frames) return 0; num_frames++; p--; } /* * If the hash is 1, then continue only if the next page has a * continuation hash of any value. This is used for readdir * handling. Otherwise, check to see if the hash matches the * desired contiuation hash. If it doesn't, return since * there's no point to read in the successive index pages. */ bhash = dx_get_hash(p->at); if (start_hash) *start_hash = bhash; if ((hash & 1) == 0) { if ((bhash & ~1) != hash) return 0; } /* * If the hash is HASH_NB_ALWAYS, we always go to the next * block so no check is necessary */ while (num_frames--) { if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at), 0, &err))) { if (!err) { ext4_error(dir->i_sb, "Directory hole detected on inode %lu\n", dir->i_ino); return -EIO; } return err; /* Failure */ } if (!buffer_verified(bh) && !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { ext4_warning(dir->i_sb, "Node failed checksum"); return -EIO; } set_buffer_verified(bh); p++; brelse(p->bh); p->bh = bh; p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; } return 1; } /* * This function fills a red-black tree with information from a * directory block. It returns the number directory entries loaded * into the tree. If there is an error it is returned in err. */ static int htree_dirblock_to_tree(struct file *dir_file, struct inode *dir, ext4_lblk_t block, struct dx_hash_info *hinfo, __u32 start_hash, __u32 start_minor_hash) { struct buffer_head *bh; struct ext4_dir_entry_2 *de, *top; int err = 0, count = 0; dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n", (unsigned long)block)); if (!(bh = ext4_bread(NULL, dir, block, 0, &err))) { if (!err) { err = -EIO; ext4_error(dir->i_sb, "Directory hole detected on inode %lu\n", dir->i_ino); } return err; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) return -EIO; set_buffer_verified(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; top = (struct ext4_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0)); for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) + ((char *)de - bh->b_data))) { /* On error, skip the f_pos to the next block. */ dir_file->f_pos = (dir_file->f_pos | (dir->i_sb->s_blocksize - 1)) + 1; brelse(bh); return count; } ext4fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && (hinfo->minor_hash < start_minor_hash))) continue; if (de->inode == 0) continue; if ((err = ext4_htree_store_dirent(dir_file, hinfo->hash, hinfo->minor_hash, de)) != 0) { brelse(bh); return err; } count++; } brelse(bh); return count; } /* * This function fills a red-black tree with information from a * directory. We start scanning the directory in hash order, starting * at start_hash and start_minor_hash. * * This function returns the number of entries inserted into the tree, * or a negative error code. */ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, __u32 start_minor_hash, __u32 *next_hash) { struct dx_hash_info hinfo; struct ext4_dir_entry_2 *de; struct dx_frame frames[2], *frame; struct inode *dir; ext4_lblk_t block; int count = 0; int ret, err; __u32 hashval; dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", start_hash, start_minor_hash)); dir = dir_file->f_path.dentry->d_inode; if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) { hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, start_hash, start_minor_hash); *next_hash = ~0; return count; } hinfo.hash = start_hash; hinfo.minor_hash = 0; frame = dx_probe(NULL, dir, &hinfo, frames, &err); if (!frame) return err; /* Add '.' and '..' from the htree header */ if (!start_hash && !start_minor_hash) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0) goto errout; count++; } if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; de = ext4_next_entry(de, dir->i_sb->s_blocksize); if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0) goto errout; count++; } while (1) { block = dx_get_block(frame->at); ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo, start_hash, start_minor_hash); if (ret < 0) { err = ret; goto errout; } count += ret; hashval = ~0; ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS, frame, frames, &hashval); *next_hash = hashval; if (ret < 0) { err = ret; goto errout; } /* * Stop if: (a) there are no more entries, or * (b) we have inserted at least one entry and the * next hash value is not a continuation */ if ((ret == 0) || (count && ((hashval & 1) == 0))) break; } dx_release(frames); dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, " "next hash: %x\n", count, *next_hash)); return count; errout: dx_release(frames); return (err); } static inline int search_dirblock(struct buffer_head *bh, struct inode *dir, const struct qstr *d_name, unsigned int offset, struct ext4_dir_entry_2 **res_dir) { return search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir, d_name, offset, res_dir); } /* * Directory block splitting, compacting */ /* * Create map of hash values, offsets, and sizes, stored at end of block. * Returns number of entries mapped. */ static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) { int count = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; while ((char *) de < base + blocksize) { if (de->name_len && de->inode) { ext4fs_dirhash(de->name, de->name_len, &h); map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; map_tail->size = le16_to_cpu(de->rec_len); count++; cond_resched(); } /* XXX: do we need to check rec_len == 0 case? -Chris */ de = ext4_next_entry(de, blocksize); } return count; } /* Sort map by hash value */ static void dx_sort_map (struct dx_map_entry *map, unsigned count) { struct dx_map_entry *p, *q, *top = map + count - 1; int more; /* Combsort until bubble sort doesn't suck */ while (count > 2) { count = count*10/13; if (count - 9 < 2) /* 9, 10 -> 11 */ count = 11; for (p = top, q = p - count; q >= map; p--, q--) if (p->hash < q->hash) swap(*p, *q); } /* Garden variety bubble sort */ do { more = 0; q = top; while (q-- > map) { if (q[1].hash >= q[0].hash) continue; swap(*(q+1), *q); more = 1; } } while(more); } static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block) { struct dx_entry *entries = frame->entries; struct dx_entry *old = frame->at, *new = old + 1; int count = dx_get_count(entries); assert(count < dx_get_limit(entries)); assert(old < entries + count); memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); dx_set_hash(new, hash); dx_set_block(new, block); dx_set_count(entries, count + 1); } /* * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure. * * `len <= EXT4_NAME_LEN' is guaranteed by caller. * `de != NULL' is guaranteed by caller. */ static inline int ext4_match (int len, const char * const name, struct ext4_dir_entry_2 * de) { if (len != de->name_len) return 0; if (!de->inode) return 0; return !memcmp(name, de->name, len); } /* * Returns 0 if not found, -1 on failure, and 1 on success */ int search_dir(struct buffer_head *bh, char *search_buf, int buf_size, struct inode *dir, const struct qstr *d_name, unsigned int offset, struct ext4_dir_entry_2 **res_dir) { struct ext4_dir_entry_2 * de; char * dlimit; int de_len; const char *name = d_name->name; int namelen = d_name->len; de = (struct ext4_dir_entry_2 *)search_buf; dlimit = search_buf + buf_size; while ((char *) de < dlimit) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ if ((char *) de + namelen <= dlimit && ext4_match (namelen, name, de)) { /* found a match - just to be sure, do a full check */ if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, offset)) return -1; *res_dir = de; return 1; } /* prevent looping on a bad block */ de_len = ext4_rec_len_from_disk(de->rec_len, dir->i_sb->s_blocksize); if (de_len <= 0) return -1; offset += de_len; de = (struct ext4_dir_entry_2 *) ((char *) de + de_len); } return 0; } static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block, struct ext4_dir_entry *de) { struct super_block *sb = dir->i_sb; if (!is_dx(dir)) return 0; if (block == 0) return 1; if (de->inode == 0 && ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) == sb->s_blocksize) return 1; return 0; } /* * ext4_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the cache buffer in which the entry was found, and the entry * itself (as a parameter - res_dir). It does NOT read the inode of the * entry - you'll have to do that yourself if you want to. * * The returned buffer_head has ->b_count elevated. The caller is expected * to brelse() it when appropriate. */ static struct buffer_head * ext4_find_entry (struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *inlined) { struct super_block *sb; struct buffer_head *bh_use[NAMEI_RA_SIZE]; struct buffer_head *bh, *ret = NULL; ext4_lblk_t start, block, b; const u8 *name = d_name->name; int ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ int ra_ptr = 0; /* Current index into readahead buffer */ int num = 0; ext4_lblk_t nblocks; int i, err; int namelen; *res_dir = NULL; sb = dir->i_sb; namelen = d_name->len; if (namelen > EXT4_NAME_LEN) return NULL; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; ret = ext4_find_inline_entry(dir, d_name, res_dir, &has_inline_data); if (has_inline_data) { if (inlined) *inlined = 1; return ret; } } if ((namelen <= 2) && (name[0] == '.') && (name[1] == '.' || name[1] == '\0')) { /* * "." or ".." will only be in the first block * NFS may look up ".."; "." should be handled by the VFS */ block = start = 0; nblocks = 1; goto restart; } if (is_dx(dir)) { bh = ext4_dx_find_entry(dir, d_name, res_dir, &err); /* * On success, or if the error was file not found, * return. Otherwise, fall back to doing a search the * old fashioned way. */ if (bh || (err != ERR_BAD_DX_DIR)) return bh; dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " "falling back\n")); } nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); start = EXT4_I(dir)->i_dir_start_lookup; if (start >= nblocks) start = 0; block = start; restart: do { /* * We deal with the read-ahead logic here. */ if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; b = block; for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { /* * Terminate if we reach the end of the * directory and must wrap, or if our * search has finished at this block. */ if (b >= nblocks || (num && block == start)) { bh_use[ra_max] = NULL; break; } num++; bh = ext4_getblk(NULL, dir, b++, 0, &err); bh_use[ra_max] = bh; if (bh) ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; wait_on_buffer(bh); if (!buffer_uptodate(bh)) { /* read error, skip block & hope for the best */ EXT4_ERROR_INODE(dir, "reading directory lblock %lu", (unsigned long) block); brelse(bh); goto next; } if (!buffer_verified(bh) && !is_dx_internal_node(dir, block, (struct ext4_dir_entry *)bh->b_data) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(dir, "checksumming directory " "block %lu", (unsigned long)block); brelse(bh); goto next; } set_buffer_verified(bh); i = search_dirblock(bh, dir, d_name, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (i == 1) { EXT4_I(dir)->i_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; } else { brelse(bh); if (i < 0) goto cleanup_and_exit; } next: if (++block >= nblocks) block = 0; } while (block != start); /* * If the directory has grown while we were searching, then * search the last part of the directory before giving up. */ block = nblocks; nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); if (block < nblocks) { start = 0; goto restart; } cleanup_and_exit: /* Clean up the read-ahead blocks */ for (; ra_ptr < ra_max; ra_ptr++) brelse(bh_use[ra_ptr]); return ret; } static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *err) { struct super_block * sb = dir->i_sb; struct dx_hash_info hinfo; struct dx_frame frames[2], *frame; struct buffer_head *bh; ext4_lblk_t block; int retval; if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err))) return NULL; do { block = dx_get_block(frame->at); if (!(bh = ext4_bread(NULL, dir, block, 0, err))) { if (!(*err)) { *err = -EIO; ext4_error(dir->i_sb, "Directory hole detected on inode %lu\n", dir->i_ino); } goto errout; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(dir, "checksumming directory " "block %lu", (unsigned long)block); brelse(bh); *err = -EIO; goto errout; } set_buffer_verified(bh); retval = search_dirblock(bh, dir, d_name, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (retval == 1) { /* Success! */ dx_release(frames); return bh; } brelse(bh); if (retval == -1) { *err = ERR_BAD_DX_DIR; goto errout; } /* Check to see if we should continue to search */ retval = ext4_htree_next_block(dir, hinfo.hash, frame, frames, NULL); if (retval < 0) { ext4_warning(sb, "error reading index page in directory #%lu", dir->i_ino); *err = retval; goto errout; } } while (retval == 1); *err = -ENOENT; errout: dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name)); dx_release (frames); return NULL; } static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode; struct ext4_dir_entry_2 *de; struct buffer_head *bh; if (dentry->d_name.len > EXT4_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); inode = NULL; if (bh) { __u32 ino = le32_to_cpu(de->inode); brelse(bh); if (!ext4_valid_inum(dir->i_sb, ino)) { EXT4_ERROR_INODE(dir, "bad inode number: %u", ino); return ERR_PTR(-EIO); } if (unlikely(ino == dir->i_ino)) { EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir", dentry->d_name.len, dentry->d_name.name); return ERR_PTR(-EIO); } inode = ext4_iget(dir->i_sb, ino); if (inode == ERR_PTR(-ESTALE)) { EXT4_ERROR_INODE(dir, "deleted inode referenced: %u", ino); return ERR_PTR(-EIO); } } return d_splice_alias(inode, dentry); } struct dentry *ext4_get_parent(struct dentry *child) { __u32 ino; static const struct qstr dotdot = QSTR_INIT("..", 2); struct ext4_dir_entry_2 * de; struct buffer_head *bh; bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL); if (!bh) return ERR_PTR(-ENOENT); ino = le32_to_cpu(de->inode); brelse(bh); if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { EXT4_ERROR_INODE(child->d_inode, "bad parent inode number: %u", ino); return ERR_PTR(-EIO); } return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino)); } #define S_SHIFT 12 static unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFREG >> S_SHIFT] = EXT4_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = EXT4_FT_DIR, [S_IFCHR >> S_SHIFT] = EXT4_FT_CHRDEV, [S_IFBLK >> S_SHIFT] = EXT4_FT_BLKDEV, [S_IFIFO >> S_SHIFT] = EXT4_FT_FIFO, [S_IFSOCK >> S_SHIFT] = EXT4_FT_SOCK, [S_IFLNK >> S_SHIFT] = EXT4_FT_SYMLINK, }; static inline void ext4_set_de_type(struct super_block *sb, struct ext4_dir_entry_2 *de, umode_t mode) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE)) de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; } /* * Move count entries from end of map between two memory locations. * Returns pointer to last entry moved. */ static struct ext4_dir_entry_2 * dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count, unsigned blocksize) { unsigned rec_len = 0; while (count--) { struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) (from + (map->offs<<2)); rec_len = EXT4_DIR_REC_LEN(de->name_len); memcpy (to, de, rec_len); ((struct ext4_dir_entry_2 *) to)->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); de->inode = 0; map++; to += rec_len; } return (struct ext4_dir_entry_2 *) (to - rec_len); } /* * Compact each dir entry in the range to the minimal rec_len. * Returns pointer to last entry in range. */ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize) { struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base; unsigned rec_len = 0; prev = to = de; while ((char*)de < base + blocksize) { next = ext4_next_entry(de, blocksize); if (de->inode && de->name_len) { rec_len = EXT4_DIR_REC_LEN(de->name_len); if (de > to) memmove(to, de, rec_len); to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); prev = to; to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len); } de = next; } return prev; } /* * Split a full leaf block to make room for a new dir entry. * Allocate a new block, and move entries so that they are approx. equally full. * Returns pointer to de in block into which the new entry will be inserted. */ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, struct buffer_head **bh,struct dx_frame *frame, struct dx_hash_info *hinfo, int *error) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count, continued; struct buffer_head *bh2; ext4_lblk_t newblock; u32 hash2; struct dx_map_entry *map; char *data1 = (*bh)->b_data, *data2; unsigned split, move, size; struct ext4_dir_entry_2 *de = NULL, *de2; struct ext4_dir_entry_tail *t; int csum_size = 0; int err = 0, i; if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); bh2 = ext4_append (handle, dir, &newblock, &err); if (!(bh2)) { brelse(*bh); *bh = NULL; goto errout; } BUFFER_TRACE(*bh, "get_write_access"); err = ext4_journal_get_write_access(handle, *bh); if (err) goto journal_error; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; data2 = bh2->b_data; /* create map in the end of data2 block */ map = (struct dx_map_entry *) (data2 + blocksize); count = dx_make_map((struct ext4_dir_entry_2 *) data1, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); /* Split the existing block in the middle, size-wise */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { /* is more than half of this entry in 2nd half of the block? */ if (size + map[i].size/2 > blocksize/2) break; size += map[i].size; move++; } /* map index at which we will split */ split = count - move; hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", (unsigned long)dx_get_block(frame->at), hash2, split, count-split)); /* Fancy dance to stay within two buffers */ de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize); de = dx_pack_dirents(data1, blocksize); de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - (char *) de, blocksize); de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) - (char *) de2, blocksize); if (csum_size) { t = EXT4_DIRENT_TAIL(data2, blocksize); initialize_dirent_tail(t, blocksize); t = EXT4_DIRENT_TAIL(data1, blocksize); initialize_dirent_tail(t, blocksize); } dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); /* Which block gets the new entry? */ if (hinfo->hash >= hash2) { swap(*bh, bh2); de = de2; } dx_insert_block(frame, hash2 + continued, newblock); err = ext4_handle_dirty_dirent_node(handle, dir, bh2); if (err) goto journal_error; err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (err) goto journal_error; brelse(bh2); dxtrace(dx_show_index("frame", frame->entries)); return de; journal_error: brelse(*bh); brelse(bh2); *bh = NULL; ext4_std_error(dir->i_sb, err); errout: *error = err; return NULL; } int ext4_find_dest_de(struct inode *dir, struct inode *inode, struct buffer_head *bh, void *buf, int buf_size, const char *name, int namelen, struct ext4_dir_entry_2 **dest_de) { struct ext4_dir_entry_2 *de; unsigned short reclen = EXT4_DIR_REC_LEN(namelen); int nlen, rlen; unsigned int offset = 0; char *top; de = (struct ext4_dir_entry_2 *)buf; top = buf + buf_size - reclen; while ((char *) de <= top) { if (ext4_check_dir_entry(dir, NULL, de, bh, buf, buf_size, offset)) return -EIO; if (ext4_match(namelen, name, de)) return -EEXIST; nlen = EXT4_DIR_REC_LEN(de->name_len); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if ((de->inode ? rlen - nlen : rlen) >= reclen) break; de = (struct ext4_dir_entry_2 *)((char *)de + rlen); offset += rlen; } if ((char *) de > top) return -ENOSPC; *dest_de = de; return 0; } void ext4_insert_dentry(struct inode *inode, struct ext4_dir_entry_2 *de, int buf_size, const char *name, int namelen) { int nlen, rlen; nlen = EXT4_DIR_REC_LEN(de->name_len); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if (de->inode) { struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen); de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size); de->rec_len = ext4_rec_len_to_disk(nlen, buf_size); de = de1; } de->file_type = EXT4_FT_UNKNOWN; de->inode = cpu_to_le32(inode->i_ino); ext4_set_de_type(inode->i_sb, de, inode->i_mode); de->name_len = namelen; memcpy(de->name, name, namelen); } /* * Add a new entry into a directory (leaf) block. If de is non-NULL, * it points to a directory entry which is guaranteed to be large * enough for new directory entry. If de is NULL, then * add_dirent_to_buf will attempt search the directory block for * space. It will return -ENOSPC if no space is available, and -EIO * and -EEXIST if directory entry already exists. */ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, struct inode *inode, struct ext4_dir_entry_2 *de, struct buffer_head *bh) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned int blocksize = dir->i_sb->s_blocksize; unsigned short reclen; int csum_size = 0; int err; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); reclen = EXT4_DIR_REC_LEN(namelen); if (!de) { err = ext4_find_dest_de(dir, inode, bh, bh->b_data, blocksize - csum_size, name, namelen, &de); if (err) return err; } BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) { ext4_std_error(dir->i_sb, err); return err; } /* By now the buffer is marked for journaling */ ext4_insert_dentry(inode, de, blocksize, name, namelen); /* * XXX shouldn't update any times until successful * completion of syscall, but too many callers depend * on this. * * XXX similarly, too many callers depend on * ext4_new_inode() setting the times, but error * recovery deletes the inode, so the worst that can * happen is that the times are slightly out of date * and/or different from the directory change time. */ dir->i_mtime = dir->i_ctime = ext4_current_time(dir); ext4_update_dx_flag(dir); dir->i_version++; ext4_mark_inode_dirty(handle, dir); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (err) ext4_std_error(dir->i_sb, err); return 0; } /* * This converts a one block unindexed directory to a 3 block indexed * directory, and adds the dentry to the indexed directory. */ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct inode *inode, struct buffer_head *bh) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct buffer_head *bh2; struct dx_root *root; struct dx_frame frames[2], *frame; struct dx_entry *entries; struct ext4_dir_entry_2 *de, *de2; struct ext4_dir_entry_tail *t; char *data1, *top; unsigned len; int retval; unsigned blocksize; struct dx_hash_info hinfo; ext4_lblk_t block; struct fake_dirent *fde; int csum_size = 0; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); blocksize = dir->i_sb->s_blocksize; dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); retval = ext4_journal_get_write_access(handle, bh); if (retval) { ext4_std_error(dir->i_sb, retval); brelse(bh); return retval; } root = (struct dx_root *) bh->b_data; /* The 0th block becomes the root, move the dirents out */ fde = &root->dotdot; de = (struct ext4_dir_entry_2 *)((char *)fde + ext4_rec_len_from_disk(fde->rec_len, blocksize)); if ((char *) de >= (((char *) root) + blocksize)) { EXT4_ERROR_INODE(dir, "invalid rec_len for '..'"); brelse(bh); return -EIO; } len = ((char *) root) + (blocksize - csum_size) - (char *) de; /* Allocate new block for the 0th block's dirents */ bh2 = ext4_append(handle, dir, &block, &retval); if (!(bh2)) { brelse(bh); return retval; } ext4_set_inode_flag(dir, EXT4_INODE_INDEX); data1 = bh2->b_data; memcpy (data1, de, len); de = (struct ext4_dir_entry_2 *) data1; top = data1 + len; while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) de = de2; de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - (char *) de, blocksize); if (csum_size) { t = EXT4_DIRENT_TAIL(data1, blocksize); initialize_dirent_tail(t, blocksize); } /* Initialize the root; the dot dirents already exist */ de = (struct ext4_dir_entry_2 *) (&root->dotdot); de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2), blocksize); memset (&root->info, 0, sizeof(root->info)); root->info.info_length = sizeof(root->info); root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; entries = root->entries; dx_set_block(entries, 1); dx_set_count(entries, 1); dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info))); /* Initialize as for dx_probe */ hinfo.hash_version = root->info.hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; ext4fs_dirhash(name, namelen, &hinfo); frame = frames; frame->entries = entries; frame->at = entries; frame->bh = bh; bh = bh2; ext4_handle_dirty_dx_node(handle, dir, frame->bh); ext4_handle_dirty_dirent_node(handle, dir, bh); de = do_split(handle,dir, &bh, frame, &hinfo, &retval); if (!de) { /* * Even if the block split failed, we have to properly write * out all the changes we did so far. Otherwise we can end up * with corrupted filesystem. */ ext4_mark_inode_dirty(handle, dir); dx_release(frames); return retval; } dx_release(frames); retval = add_dirent_to_buf(handle, dentry, inode, de, bh); brelse(bh); return retval; } /* * ext4_add_entry() * * adds a file entry to the specified directory, using the same * semantics as ext4_find_entry(). It returns NULL if it failed. * * NOTE!! The inode part of 'de' is left at 0 - which means you * may not sleep between calling this and putting something into * the entry, as someone else might have used it while you slept. */ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; struct ext4_dir_entry_tail *t; struct super_block *sb; int retval; int dx_fallback=0; unsigned blocksize; ext4_lblk_t block, blocks; int csum_size = 0; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); sb = dir->i_sb; blocksize = sb->s_blocksize; if (!dentry->d_name.len) return -EINVAL; if (ext4_has_inline_data(dir)) { retval = ext4_try_add_inline_entry(handle, dentry, inode); if (retval < 0) return retval; if (retval == 1) { retval = 0; return retval; } } if (is_dx(dir)) { retval = ext4_dx_add_entry(handle, dentry, inode); if (!retval || (retval != ERR_BAD_DX_DIR)) return retval; ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); dx_fallback++; ext4_mark_inode_dirty(handle, dir); } blocks = dir->i_size >> sb->s_blocksize_bits; for (block = 0; block < blocks; block++) { if (!(bh = ext4_bread(handle, dir, block, 0, &retval))) { if (!retval) { retval = -EIO; ext4_error(inode->i_sb, "Directory hole detected on inode %lu\n", inode->i_ino); } return retval; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) return -EIO; set_buffer_verified(bh); retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); if (retval != -ENOSPC) { brelse(bh); return retval; } if (blocks == 1 && !dx_fallback && EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) return make_indexed_dir(handle, dentry, inode, bh); brelse(bh); } bh = ext4_append(handle, dir, &block, &retval); if (!bh) return retval; de = (struct ext4_dir_entry_2 *) bh->b_data; de->inode = 0; de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize); if (csum_size) { t = EXT4_DIRENT_TAIL(bh->b_data, blocksize); initialize_dirent_tail(t, blocksize); } retval = add_dirent_to_buf(handle, dentry, inode, de, bh); brelse(bh); if (retval == 0) ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); return retval; } /* * Returns 0 for success, or a negative error value */ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode) { struct dx_frame frames[2], *frame; struct dx_entry *entries, *at; struct dx_hash_info hinfo; struct buffer_head *bh; struct inode *dir = dentry->d_parent->d_inode; struct super_block *sb = dir->i_sb; struct ext4_dir_entry_2 *de; int err; frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err); if (!frame) return err; entries = frame->entries; at = frame->at; if (!(bh = ext4_bread(handle, dir, dx_get_block(frame->at), 0, &err))) { if (!err) { err = -EIO; ext4_error(dir->i_sb, "Directory hole detected on inode %lu\n", dir->i_ino); } goto cleanup; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) goto journal_error; set_buffer_verified(bh); BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) goto journal_error; err = add_dirent_to_buf(handle, dentry, inode, NULL, bh); if (err != -ENOSPC) goto cleanup; /* Block full, should compress but for now just split */ dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n", dx_get_count(entries), dx_get_limit(entries))); /* Need to split index? */ if (dx_get_count(entries) == dx_get_limit(entries)) { ext4_lblk_t newblock; unsigned icount = dx_get_count(entries); int levels = frame - frames; struct dx_entry *entries2; struct dx_node *node2; struct buffer_head *bh2; if (levels && (dx_get_count(frames->entries) == dx_get_limit(frames->entries))) { ext4_warning(sb, "Directory index full!"); err = -ENOSPC; goto cleanup; } bh2 = ext4_append (handle, dir, &newblock, &err); if (!(bh2)) goto cleanup; node2 = (struct dx_node *)(bh2->b_data); entries2 = node2->entries; memset(&node2->fake, 0, sizeof(struct fake_dirent)); node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize, sb->s_blocksize); BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; if (levels) { unsigned icount1 = icount/2, icount2 = icount - icount1; unsigned hash2 = dx_get_hash(entries + icount1); dxtrace(printk(KERN_DEBUG "Split index %i/%i\n", icount1, icount2)); BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */ err = ext4_journal_get_write_access(handle, frames[0].bh); if (err) goto journal_error; memcpy((char *) entries2, (char *) (entries + icount1), icount2 * sizeof(struct dx_entry)); dx_set_count(entries, icount1); dx_set_count(entries2, icount2); dx_set_limit(entries2, dx_node_limit(dir)); /* Which index block gets the new entry? */ if (at - entries >= icount1) { frame->at = at = at - entries - icount1 + entries2; frame->entries = entries = entries2; swap(frame->bh, bh2); } dx_insert_block(frames + 0, hash2, newblock); dxtrace(dx_show_index("node", frames[1].entries)); dxtrace(dx_show_index("node", ((struct dx_node *) bh2->b_data)->entries)); err = ext4_handle_dirty_dx_node(handle, dir, bh2); if (err) goto journal_error; brelse (bh2); } else { dxtrace(printk(KERN_DEBUG "Creating second level index...\n")); memcpy((char *) entries2, (char *) entries, icount * sizeof(struct dx_entry)); dx_set_limit(entries2, dx_node_limit(dir)); /* Set up root */ dx_set_count(entries, 1); dx_set_block(entries + 0, newblock); ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1; /* Add new access path frame */ frame = frames + 1; frame->at = at = at - entries + entries2; frame->entries = entries = entries2; frame->bh = bh2; err = ext4_journal_get_write_access(handle, frame->bh); if (err) goto journal_error; } err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh); if (err) { ext4_std_error(inode->i_sb, err); goto cleanup; } } de = do_split(handle, dir, &bh, frame, &hinfo, &err); if (!de) goto cleanup; err = add_dirent_to_buf(handle, dentry, inode, de, bh); goto cleanup; journal_error: ext4_std_error(dir->i_sb, err); cleanup: if (bh) brelse(bh); dx_release(frames); return err; } /* * ext4_generic_delete_entry deletes a directory entry by merging it * with the previous entry */ int ext4_generic_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, void *entry_buf, int buf_size, int csum_size) { struct ext4_dir_entry_2 *de, *pde; unsigned int blocksize = dir->i_sb->s_blocksize; int i; i = 0; pde = NULL; de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, bh->b_size, i)) return -EIO; if (de == de_del) { if (pde) pde->rec_len = ext4_rec_len_to_disk( ext4_rec_len_from_disk(pde->rec_len, blocksize) + ext4_rec_len_from_disk(de->rec_len, blocksize), blocksize); else de->inode = 0; dir->i_version++; return 0; } i += ext4_rec_len_from_disk(de->rec_len, blocksize); pde = de; de = ext4_next_entry(de, blocksize); } return -ENOENT; } static int ext4_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh) { int err, csum_size = 0; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; err = ext4_delete_inline_entry(handle, dir, de_del, bh, &has_inline_data); if (has_inline_data) return err; } if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (unlikely(err)) goto out; err = ext4_generic_delete_entry(handle, dir, de_del, bh, bh->b_data, dir->i_sb->s_blocksize, csum_size); if (err) goto out; BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (unlikely(err)) goto out; return 0; out: if (err != -ENOENT) ext4_std_error(dir->i_sb, err); return err; } /* * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2, * since this indicates that nlinks count was previously 1. */ static void ext4_inc_count(handle_t *handle, struct inode *inode) { inc_nlink(inode); if (is_dx(inode) && inode->i_nlink > 1) { /* limit is 16-bit i_links_count */ if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) { set_nlink(inode, 1); EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_DIR_NLINK); } } } /* * If a directory had nlink == 1, then we should let it be 1. This indicates * directory has >EXT4_LINK_MAX subdirs. */ static void ext4_dec_count(handle_t *handle, struct inode *inode) { if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) drop_nlink(inode); } static int ext4_add_nondir(handle_t *handle, struct dentry *dentry, struct inode *inode) { int err = ext4_add_entry(handle, dentry, inode); if (!err) { ext4_mark_inode_dirty(handle, inode); unlock_new_inode(inode); d_instantiate(dentry, inode); return 0; } drop_nlink(inode); unlock_new_inode(inode); iput(inode); return err; } /* * By the time this is called, we already have created * the directory cache entry for the new file, but it * is so far negative - it has no inode. * * If the create succeeds, we fill in the inode information * with d_instantiate(). */ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { handle_t *handle; struct inode *inode; int err, retries = 0; dquot_initialize(dir); retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); err = ext4_add_nondir(handle, dentry, inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } static int ext4_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { handle_t *handle; struct inode *inode; int err, retries = 0; if (!new_valid_dev(rdev)) return -EINVAL; dquot_initialize(dir); retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &ext4_special_inode_operations; err = ext4_add_nondir(handle, dentry, inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, struct ext4_dir_entry_2 *de, int blocksize, int csum_size, unsigned int parent_ino, int dotdot_real_len) { de->inode = cpu_to_le32(inode->i_ino); de->name_len = 1; de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len), blocksize); strcpy(de->name, "."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); de = ext4_next_entry(de, blocksize); de->inode = cpu_to_le32(parent_ino); de->name_len = 2; if (!dotdot_real_len) de->rec_len = ext4_rec_len_to_disk(blocksize - (csum_size + EXT4_DIR_REC_LEN(1)), blocksize); else de->rec_len = ext4_rec_len_to_disk( EXT4_DIR_REC_LEN(de->name_len), blocksize); strcpy(de->name, ".."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); return ext4_next_entry(de, blocksize); } static int ext4_init_new_dir(handle_t *handle, struct inode *dir, struct inode *inode) { struct buffer_head *dir_block = NULL; struct ext4_dir_entry_2 *de; struct ext4_dir_entry_tail *t; unsigned int blocksize = dir->i_sb->s_blocksize; int csum_size = 0; int err; if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { err = ext4_try_create_inline_dir(handle, dir, inode); if (err < 0 && err != -ENOSPC) goto out; if (!err) goto out; } inode->i_size = EXT4_I(inode)->i_disksize = blocksize; dir_block = ext4_bread(handle, inode, 0, 1, &err); if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) { if (!err) { err = -EIO; ext4_error(inode->i_sb, "Directory hole detected on inode %lu\n", inode->i_ino); } goto out; } BUFFER_TRACE(dir_block, "get_write_access"); err = ext4_journal_get_write_access(handle, dir_block); if (err) goto out; de = (struct ext4_dir_entry_2 *)dir_block->b_data; ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0); set_nlink(inode, 2); if (csum_size) { t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize); initialize_dirent_tail(t, blocksize); } BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); if (err) goto out; set_buffer_verified(dir_block); out: brelse(dir_block); return err; } static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { handle_t *handle; struct inode *inode; int err, retries = 0; if (EXT4_DIR_LINK_MAX(dir)) return -EMLINK; dquot_initialize(dir); retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode(handle, dir, S_IFDIR | mode, &dentry->d_name, 0, NULL); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_stop; inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; err = ext4_init_new_dir(handle, dir, inode); if (err) goto out_clear_inode; err = ext4_mark_inode_dirty(handle, inode); if (!err) err = ext4_add_entry(handle, dentry, inode); if (err) { out_clear_inode: clear_nlink(inode); unlock_new_inode(inode); ext4_mark_inode_dirty(handle, inode); iput(inode); goto out_stop; } ext4_inc_count(handle, dir); ext4_update_dx_flag(dir); err = ext4_mark_inode_dirty(handle, dir); if (err) goto out_clear_inode; unlock_new_inode(inode); d_instantiate(dentry, inode); out_stop: ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } /* * routine to check that the specified directory is empty (for rmdir) */ static int empty_dir(struct inode *inode) { unsigned int offset; struct buffer_head *bh; struct ext4_dir_entry_2 *de, *de1; struct super_block *sb; int err = 0; if (ext4_has_inline_data(inode)) { int has_inline_data = 1; err = empty_inline_dir(inode, &has_inline_data); if (has_inline_data) return err; } sb = inode->i_sb; if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || !(bh = ext4_bread(NULL, inode, 0, 0, &err))) { if (err) EXT4_ERROR_INODE(inode, "error %d reading directory lblock 0", err); else ext4_warning(inode->i_sb, "bad directory (dir #%lu) - no data block", inode->i_ino); return 1; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(inode, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(inode, "checksum error reading directory " "lblock 0"); return -EIO; } set_buffer_verified(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; de1 = ext4_next_entry(de, sb->s_blocksize); if (le32_to_cpu(de->inode) != inode->i_ino || !le32_to_cpu(de1->inode) || strcmp(".", de->name) || strcmp("..", de1->name)) { ext4_warning(inode->i_sb, "bad directory (dir #%lu) - no `.' or `..'", inode->i_ino); brelse(bh); return 1; } offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) + ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize); de = ext4_next_entry(de1, sb->s_blocksize); while (offset < inode->i_size) { if (!bh || (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { unsigned int lblock; err = 0; brelse(bh); lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb); bh = ext4_bread(NULL, inode, lblock, 0, &err); if (!bh) { if (err) EXT4_ERROR_INODE(inode, "error %d reading directory " "lblock %u", err, lblock); else ext4_warning(inode->i_sb, "bad directory (dir #%lu) - no data block", inode->i_ino); offset += sb->s_blocksize; continue; } if (!buffer_verified(bh) && !ext4_dirent_csum_verify(inode, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(inode, "checksum error " "reading directory lblock 0"); return -EIO; } set_buffer_verified(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; } if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, offset)) { de = (struct ext4_dir_entry_2 *)(bh->b_data + sb->s_blocksize); offset = (offset | (sb->s_blocksize - 1)) + 1; continue; } if (le32_to_cpu(de->inode)) { brelse(bh); return 0; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); de = ext4_next_entry(de, sb->s_blocksize); } brelse(bh); return 1; } /* ext4_orphan_add() links an unlinked or truncated inode into a list of * such inodes, starting at the superblock, in case we crash before the * file is closed/deleted, or in case the inode truncate spans multiple * transactions and the last transaction is not recovered after a crash. * * At filesystem recovery time, we walk this list deleting unlinked * inodes and truncating linked inodes in ext4_orphan_cleanup(). */ int ext4_orphan_add(handle_t *handle, struct inode *inode) { struct super_block *sb = inode->i_sb; struct ext4_iloc iloc; int err = 0, rc; if (!EXT4_SB(sb)->s_journal) return 0; mutex_lock(&EXT4_SB(sb)->s_orphan_lock); if (!list_empty(&EXT4_I(inode)->i_orphan)) goto out_unlock; /* * Orphan handling is only valid for files with data blocks * being truncated, or files being unlinked. Note that we either * hold i_mutex, or the inode can not be referenced from outside, * so i_nlink should not be bumped due to race */ J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (err) goto out_unlock; err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto out_unlock; /* * Due to previous errors inode may be already a part of on-disk * orphan list. If so skip on-disk list modification. */ if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <= (le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) goto mem_insert; /* Insert this inode at the head of the on-disk orphan list... */ NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); err = ext4_handle_dirty_super(handle, sb); rc = ext4_mark_iloc_dirty(handle, inode, &iloc); if (!err) err = rc; /* Only add to the head of the in-memory list if all the * previous operations succeeded. If the orphan_add is going to * fail (possibly taking the journal offline), we can't risk * leaving the inode on the orphan list: stray orphan-list * entries can cause panics at unmount time. * * This is safe: on error we're going to ignore the orphan list * anyway on the next recovery. */ mem_insert: if (!err) list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); jbd_debug(4, "superblock will point to %lu\n", inode->i_ino); jbd_debug(4, "orphan inode %lu will point to %d\n", inode->i_ino, NEXT_ORPHAN(inode)); out_unlock: mutex_unlock(&EXT4_SB(sb)->s_orphan_lock); ext4_std_error(inode->i_sb, err); return err; } /* * ext4_orphan_del() removes an unlinked or truncated inode from the list * of such inodes stored on disk, because it is finally being cleaned up. */ int ext4_orphan_del(handle_t *handle, struct inode *inode) { struct list_head *prev; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi; __u32 ino_next; struct ext4_iloc iloc; int err = 0; if (!EXT4_SB(inode->i_sb)->s_journal) return 0; mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock); if (list_empty(&ei->i_orphan)) goto out; ino_next = NEXT_ORPHAN(inode); prev = ei->i_orphan.prev; sbi = EXT4_SB(inode->i_sb); jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino); list_del_init(&ei->i_orphan); /* If we're on an error path, we may not have a valid * transaction handle with which to update the orphan list on * disk, but we still need to remove the inode from the linked * list in memory. */ if (!handle) goto out; err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto out_err; if (prev == &sbi->s_orphan) { jbd_debug(4, "superblock will point to %u\n", ino_next); BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto out_brelse; sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); err = ext4_handle_dirty_super(handle, inode->i_sb); } else { struct ext4_iloc iloc2; struct inode *i_prev = &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode; jbd_debug(4, "orphan inode %lu will point to %u\n", i_prev->i_ino, ino_next); err = ext4_reserve_inode_write(handle, i_prev, &iloc2); if (err) goto out_brelse; NEXT_ORPHAN(i_prev) = ino_next; err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2); } if (err) goto out_brelse; NEXT_ORPHAN(inode) = 0; err = ext4_mark_iloc_dirty(handle, inode, &iloc); out_err: ext4_std_error(inode->i_sb, err); out: mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock); return err; out_brelse: brelse(iloc.bh); goto out_err; } static int ext4_rmdir(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle; /* Initialize quotas before so that eventual writes go in * separate transaction */ dquot_initialize(dir); dquot_initialize(dentry->d_inode); handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (!bh) goto end_rmdir; if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = dentry->d_inode; retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_rmdir; retval = -ENOTEMPTY; if (!empty_dir(inode)) goto end_rmdir; retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto end_rmdir; if (!EXT4_DIR_LINK_EMPTY(inode)) ext4_warning(inode->i_sb, "empty directory has too many links (%d)", inode->i_nlink); inode->i_version++; clear_nlink(inode); /* There's no need to set i_disksize: the fact that i_nlink is * zero will ensure that the right thing happens during any * recovery. */ inode->i_size = 0; ext4_orphan_add(handle, inode); inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); ext4_dec_count(handle, dir); ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); end_rmdir: ext4_journal_stop(handle); brelse(bh); return retval; } static int ext4_unlink(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle; trace_ext4_unlink_enter(dir, dentry); /* Initialize quotas before so that eventual writes go * in separate transaction */ dquot_initialize(dir); dquot_initialize(dentry->d_inode); handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (!bh) goto end_unlink; inode = dentry->d_inode; retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_unlink; if (!inode->i_nlink) { ext4_warning(inode->i_sb, "Deleting nonexistent file (%lu), %d", inode->i_ino, inode->i_nlink); set_nlink(inode, 1); } retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto end_unlink; dir->i_ctime = dir->i_mtime = ext4_current_time(dir); ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); drop_nlink(inode); if (!inode->i_nlink) ext4_orphan_add(handle, inode); inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); retval = 0; end_unlink: ext4_journal_stop(handle); brelse(bh); trace_ext4_unlink_exit(dentry, retval); return retval; } static int ext4_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { handle_t *handle; struct inode *inode; int l, err, retries = 0; int credits; l = strlen(symname)+1; if (l > dir->i_sb->s_blocksize) return -ENAMETOOLONG; dquot_initialize(dir); if (l > EXT4_N_BLOCKS * 4) { /* * For non-fast symlinks, we just allocate inode and put it on * orphan list in the first transaction => we need bitmap, * group descriptor, sb, inode block, quota blocks, and * possibly selinux xattr blocks. */ credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + EXT4_XATTR_TRANS_BLOCKS; } else { /* * Fast symlink. We have to add entry to directory * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS), * allocate new inode (bitmap, group descriptor, inode block, * quota blocks, sb is already counted in previous macros). */ credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb); } retry: handle = ext4_journal_start(dir, credits); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO, &dentry->d_name, 0, NULL); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_stop; if (l > EXT4_N_BLOCKS * 4) { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); /* * We cannot call page_symlink() with transaction started * because it calls into ext4_write_begin() which can wait * for transaction commit if we are running out of space * and thus we deadlock. So we have to stop transaction now * and restart it when symlink contents is written. * * To keep fs consistent in case of crash, we have to put inode * to orphan list in the mean time. */ drop_nlink(inode); err = ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (err) goto err_drop_inode; err = __page_symlink(inode, symname, l, 1); if (err) goto err_drop_inode; /* * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified */ handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto err_drop_inode; } set_nlink(inode, 1); err = ext4_orphan_del(handle, inode); if (err) { ext4_journal_stop(handle); clear_nlink(inode); goto err_drop_inode; } } else { /* clear the extent format for fast symlink */ ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); inode->i_op = &ext4_fast_symlink_inode_operations; memcpy((char *)&EXT4_I(inode)->i_data, symname, l); inode->i_size = l-1; } EXT4_I(inode)->i_disksize = inode->i_size; err = ext4_add_nondir(handle, dentry, inode); out_stop: ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; err_drop_inode: unlock_new_inode(inode); iput(inode); return err; } static int ext4_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { handle_t *handle; struct inode *inode = old_dentry->d_inode; int err, retries = 0; if (inode->i_nlink >= EXT4_LINK_MAX) return -EMLINK; dquot_initialize(dir); retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode->i_ctime = ext4_current_time(inode); ext4_inc_count(handle, inode); ihold(inode); err = ext4_add_entry(handle, dentry, inode); if (!err) { ext4_mark_inode_dirty(handle, inode); d_instantiate(dentry, inode); } else { drop_nlink(inode); iput(inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; } /* * Try to find buffer head where contains the parent block. * It should be the inode block if it is inlined or the 1st block * if it is a normal dir. */ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, struct inode *inode, int *retval, struct ext4_dir_entry_2 **parent_de, int *inlined) { struct buffer_head *bh; if (!ext4_has_inline_data(inode)) { if (!(bh = ext4_bread(handle, inode, 0, 0, retval))) { if (!*retval) { *retval = -EIO; ext4_error(inode->i_sb, "Directory hole detected on inode %lu\n", inode->i_ino); } return NULL; } *parent_de = ext4_next_entry( (struct ext4_dir_entry_2 *)bh->b_data, inode->i_sb->s_blocksize); return bh; } *inlined = 1; return ext4_get_first_inline_block(inode, parent_de, retval); } /* * Anybody can rename anything with this: the permission checks are left to the * higher-level routines. */ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { handle_t *handle; struct inode *old_inode, *new_inode; struct buffer_head *old_bh, *new_bh, *dir_bh; struct ext4_dir_entry_2 *old_de, *new_de; int retval, force_da_alloc = 0; int inlined = 0, new_inlined = 0; struct ext4_dir_entry_2 *parent_de; dquot_initialize(old_dir); dquot_initialize(new_dir); old_bh = new_bh = dir_bh = NULL; /* Initialize quotas before so that eventual writes go * in separate transaction */ if (new_dentry->d_inode) dquot_initialize(new_dentry->d_inode); handle = ext4_journal_start(old_dir, 2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) ext4_handle_sync(handle); old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL); /* * Check for inode number is _not_ due to possible IO errors. * We might rmdir the source, keep it as pwd of some process * and merrily kill the link to whatever was created under the * same name. Goodbye sticky bit ;-< */ old_inode = old_dentry->d_inode; retval = -ENOENT; if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino) goto end_rename; new_inode = new_dentry->d_inode; new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de, &new_inlined); if (new_bh) { if (!new_inode) { brelse(new_bh); new_bh = NULL; } } if (S_ISDIR(old_inode->i_mode)) { if (new_inode) { retval = -ENOTEMPTY; if (!empty_dir(new_inode)) goto end_rename; } retval = -EIO; dir_bh = ext4_get_first_dir_block(handle, old_inode, &retval, &parent_de, &inlined); if (!dir_bh) goto end_rename; if (!inlined && !buffer_verified(dir_bh) && !ext4_dirent_csum_verify(old_inode, (struct ext4_dir_entry *)dir_bh->b_data)) goto end_rename; set_buffer_verified(dir_bh); if (le32_to_cpu(parent_de->inode) != old_dir->i_ino) goto end_rename; retval = -EMLINK; if (!new_inode && new_dir != old_dir && EXT4_DIR_LINK_MAX(new_dir)) goto end_rename; BUFFER_TRACE(dir_bh, "get_write_access"); retval = ext4_journal_get_write_access(handle, dir_bh); if (retval) goto end_rename; } if (!new_bh) { retval = ext4_add_entry(handle, new_dentry, old_inode); if (retval) goto end_rename; } else { BUFFER_TRACE(new_bh, "get write access"); retval = ext4_journal_get_write_access(handle, new_bh); if (retval) goto end_rename; new_de->inode = cpu_to_le32(old_inode->i_ino); if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb, EXT4_FEATURE_INCOMPAT_FILETYPE)) new_de->file_type = old_de->file_type; new_dir->i_version++; new_dir->i_ctime = new_dir->i_mtime = ext4_current_time(new_dir); ext4_mark_inode_dirty(handle, new_dir); BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata"); if (!new_inlined) { retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh); if (unlikely(retval)) { ext4_std_error(new_dir->i_sb, retval); goto end_rename; } } brelse(new_bh); new_bh = NULL; } /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ old_inode->i_ctime = ext4_current_time(old_inode); ext4_mark_inode_dirty(handle, old_inode); /* * ok, that's it */ if (le32_to_cpu(old_de->inode) != old_inode->i_ino || old_de->name_len != old_dentry->d_name.len || strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) || (retval = ext4_delete_entry(handle, old_dir, old_de, old_bh)) == -ENOENT) { /* old_de could have moved from under us during htree split, so * make sure that we are deleting the right entry. We might * also be pointing to a stale entry in the unused part of * old_bh so just checking inum and the name isn't enough. */ struct buffer_head *old_bh2; struct ext4_dir_entry_2 *old_de2; old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2, NULL); if (old_bh2) { retval = ext4_delete_entry(handle, old_dir, old_de2, old_bh2); brelse(old_bh2); } } if (retval) { ext4_warning(old_dir->i_sb, "Deleting old file (%lu), %d, error=%d", old_dir->i_ino, old_dir->i_nlink, retval); } if (new_inode) { ext4_dec_count(handle, new_inode); new_inode->i_ctime = ext4_current_time(new_inode); } old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir); ext4_update_dx_flag(old_dir); if (dir_bh) { parent_de->inode = cpu_to_le32(new_dir->i_ino); BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata"); if (!inlined) { if (is_dx(old_inode)) { retval = ext4_handle_dirty_dx_node(handle, old_inode, dir_bh); } else { retval = ext4_handle_dirty_dirent_node(handle, old_inode, dir_bh); } } else { retval = ext4_mark_inode_dirty(handle, old_inode); } if (retval) { ext4_std_error(old_dir->i_sb, retval); goto end_rename; } ext4_dec_count(handle, old_dir); if (new_inode) { /* checked empty_dir above, can't have another parent, * ext4_dec_count() won't work for many-linked dirs */ clear_nlink(new_inode); } else { ext4_inc_count(handle, new_dir); ext4_update_dx_flag(new_dir); ext4_mark_inode_dirty(handle, new_dir); } } ext4_mark_inode_dirty(handle, old_dir); if (new_inode) { ext4_mark_inode_dirty(handle, new_inode); if (!new_inode->i_nlink) ext4_orphan_add(handle, new_inode); if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC)) force_da_alloc = 1; } retval = 0; end_rename: brelse(dir_bh); brelse(old_bh); brelse(new_bh); ext4_journal_stop(handle); if (retval == 0 && force_da_alloc) ext4_alloc_da_blocks(old_inode); return retval; } /* * directories can handle most operations... */ const struct inode_operations ext4_dir_inode_operations = { .create = ext4_create, .lookup = ext4_lookup, .link = ext4_link, .unlink = ext4_unlink, .symlink = ext4_symlink, .mkdir = ext4_mkdir, .rmdir = ext4_rmdir, .mknod = ext4_mknod, .rename = ext4_rename, .setattr = ext4_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, .get_acl = ext4_get_acl, .fiemap = ext4_fiemap, }; const struct inode_operations ext4_special_inode_operations = { .setattr = ext4_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, .get_acl = ext4_get_acl, };
./CrossVul/dataset_final_sorted/CWE-399/c/bad_5625_0
crossvul-cpp_data_good_5627_0
/* * drivers/net/veth.c * * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc * * Author: Pavel Emelianov <xemul@openvz.org> * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com> * */ #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <net/dst.h> #include <net/xfrm.h> #include <linux/veth.h> #define DRV_NAME "veth" #define DRV_VERSION "1.0" #define MIN_MTU 68 /* Min L3 MTU */ #define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */ #define MTU_PAD (ETH_HLEN + 4) /* Max difference between L2 and L3 size MTU */ struct veth_net_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long tx_dropped; unsigned long rx_dropped; }; struct veth_priv { struct net_device *peer; struct veth_net_stats __percpu *stats; unsigned ip_summed; }; /* * ethtool interface */ static struct { const char string[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "peer_ifindex" }, }; static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->supported = 0; cmd->advertising = 0; cmd->speed = SPEED_10000; cmd->duplex = DUPLEX_FULL; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->fw_version, "N/A"); } static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch(stringset) { case ETH_SS_STATS: memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); break; } } static int veth_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ethtool_stats_keys); default: return -EOPNOTSUPP; } } static void veth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct veth_priv *priv; priv = netdev_priv(dev); data[0] = priv->peer->ifindex; } static u32 veth_get_rx_csum(struct net_device *dev) { struct veth_priv *priv; priv = netdev_priv(dev); return priv->ip_summed == CHECKSUM_UNNECESSARY; } static int veth_set_rx_csum(struct net_device *dev, u32 data) { struct veth_priv *priv; priv = netdev_priv(dev); priv->ip_summed = data ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; return 0; } static u32 veth_get_tx_csum(struct net_device *dev) { return (dev->features & NETIF_F_NO_CSUM) != 0; } static int veth_set_tx_csum(struct net_device *dev, u32 data) { if (data) dev->features |= NETIF_F_NO_CSUM; else dev->features &= ~NETIF_F_NO_CSUM; return 0; } static const struct ethtool_ops veth_ethtool_ops = { .get_settings = veth_get_settings, .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, .get_rx_csum = veth_get_rx_csum, .set_rx_csum = veth_set_rx_csum, .get_tx_csum = veth_get_tx_csum, .set_tx_csum = veth_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_strings = veth_get_strings, .get_sset_count = veth_get_sset_count, .get_ethtool_stats = veth_get_ethtool_stats, }; /* * xmit */ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device *rcv = NULL; struct veth_priv *priv, *rcv_priv; struct veth_net_stats *stats, *rcv_stats; int length; priv = netdev_priv(dev); rcv = priv->peer; rcv_priv = netdev_priv(rcv); stats = this_cpu_ptr(priv->stats); rcv_stats = this_cpu_ptr(rcv_priv->stats); if (!(rcv->flags & IFF_UP)) goto tx_drop; if (dev->features & NETIF_F_NO_CSUM) skb->ip_summed = rcv_priv->ip_summed; length = skb->len + ETH_HLEN; if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) goto rx_drop; stats->tx_bytes += length; stats->tx_packets++; rcv_stats->rx_bytes += length; rcv_stats->rx_packets++; return NETDEV_TX_OK; tx_drop: kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; rx_drop: rcv_stats->rx_dropped++; return NETDEV_TX_OK; } /* * general routines */ static struct net_device_stats *veth_get_stats(struct net_device *dev) { struct veth_priv *priv; int cpu; struct veth_net_stats *stats, total = {0}; priv = netdev_priv(dev); for_each_possible_cpu(cpu) { stats = per_cpu_ptr(priv->stats, cpu); total.rx_packets += stats->rx_packets; total.tx_packets += stats->tx_packets; total.rx_bytes += stats->rx_bytes; total.tx_bytes += stats->tx_bytes; total.tx_dropped += stats->tx_dropped; total.rx_dropped += stats->rx_dropped; } dev->stats.rx_packets = total.rx_packets; dev->stats.tx_packets = total.tx_packets; dev->stats.rx_bytes = total.rx_bytes; dev->stats.tx_bytes = total.tx_bytes; dev->stats.tx_dropped = total.tx_dropped; dev->stats.rx_dropped = total.rx_dropped; return &dev->stats; } static int veth_open(struct net_device *dev) { struct veth_priv *priv; priv = netdev_priv(dev); if (priv->peer == NULL) return -ENOTCONN; if (priv->peer->flags & IFF_UP) { netif_carrier_on(dev); netif_carrier_on(priv->peer); } return 0; } static int veth_close(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); netif_carrier_off(dev); netif_carrier_off(priv->peer); return 0; } static int is_valid_veth_mtu(int new_mtu) { return (new_mtu >= MIN_MTU && new_mtu <= MAX_MTU); } static int veth_change_mtu(struct net_device *dev, int new_mtu) { if (!is_valid_veth_mtu(new_mtu)) return -EINVAL; dev->mtu = new_mtu; return 0; } static int veth_dev_init(struct net_device *dev) { struct veth_net_stats __percpu *stats; struct veth_priv *priv; stats = alloc_percpu(struct veth_net_stats); if (stats == NULL) return -ENOMEM; priv = netdev_priv(dev); priv->stats = stats; return 0; } static void veth_dev_free(struct net_device *dev) { struct veth_priv *priv; priv = netdev_priv(dev); free_percpu(priv->stats); free_netdev(dev); } static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, .ndo_stop = veth_close, .ndo_start_xmit = veth_xmit, .ndo_change_mtu = veth_change_mtu, .ndo_get_stats = veth_get_stats, .ndo_set_mac_address = eth_mac_addr, }; static void veth_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; dev->destructor = veth_dev_free; } /* * netlink interface */ static int veth_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (tb[IFLA_MTU]) { if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU]))) return -EINVAL; } return 0; } static struct rtnl_link_ops veth_link_ops; static int veth_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { int err; struct net_device *peer; struct veth_priv *priv; char ifname[IFNAMSIZ]; struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; struct ifinfomsg *ifmp; struct net *net; /* * create and register peer first */ if (data != NULL && data[VETH_INFO_PEER] != NULL) { struct nlattr *nla_peer; nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); err = nla_parse(peer_tb, IFLA_MAX, nla_data(nla_peer) + sizeof(struct ifinfomsg), nla_len(nla_peer) - sizeof(struct ifinfomsg), ifla_policy); if (err < 0) return err; err = veth_validate(peer_tb, NULL); if (err < 0) return err; tbp = peer_tb; } else { ifmp = NULL; tbp = tb; } if (tbp[IFLA_IFNAME]) nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); else snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); net = rtnl_link_get_net(src_net, tbp); if (IS_ERR(net)) return PTR_ERR(net); peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp); if (IS_ERR(peer)) { put_net(net); return PTR_ERR(peer); } if (tbp[IFLA_ADDRESS] == NULL) random_ether_addr(peer->dev_addr); err = register_netdevice(peer); put_net(net); net = NULL; if (err < 0) goto err_register_peer; netif_carrier_off(peer); err = rtnl_configure_link(peer, ifmp); if (err < 0) goto err_configure_peer; /* * register dev last * * note, that since we've registered new device the dev's name * should be re-allocated */ if (tb[IFLA_ADDRESS] == NULL) random_ether_addr(dev->dev_addr); if (tb[IFLA_IFNAME]) nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); else snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); if (strchr(dev->name, '%')) { err = dev_alloc_name(dev, dev->name); if (err < 0) goto err_alloc_name; } err = register_netdevice(dev); if (err < 0) goto err_register_dev; netif_carrier_off(dev); /* * tie the deviced together */ priv = netdev_priv(dev); priv->peer = peer; priv = netdev_priv(peer); priv->peer = dev; return 0; err_register_dev: /* nothing to do */ err_alloc_name: err_configure_peer: unregister_netdevice(peer); return err; err_register_peer: free_netdev(peer); return err; } static void veth_dellink(struct net_device *dev, struct list_head *head) { struct veth_priv *priv; struct net_device *peer; priv = netdev_priv(dev); peer = priv->peer; unregister_netdevice_queue(dev, head); unregister_netdevice_queue(peer, head); } static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; static struct rtnl_link_ops veth_link_ops = { .kind = DRV_NAME, .priv_size = sizeof(struct veth_priv), .setup = veth_setup, .validate = veth_validate, .newlink = veth_newlink, .dellink = veth_dellink, .policy = veth_policy, .maxtype = VETH_INFO_MAX, }; /* * init/fini */ static __init int veth_init(void) { return rtnl_link_register(&veth_link_ops); } static __exit void veth_exit(void) { rtnl_link_unregister(&veth_link_ops); } module_init(veth_init); module_exit(veth_exit); MODULE_DESCRIPTION("Virtual Ethernet Tunnel"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_RTNL_LINK(DRV_NAME);
./CrossVul/dataset_final_sorted/CWE-399/c/good_5627_0
crossvul-cpp_data_bad_1677_1
/* * UDP over IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/ipv4/udp.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/init.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/ip6_route.h> #include <net/raw.h> #include <net/tcp_states.h> #include <net/ip6_checksum.h> #include <net/xfrm.h> #include <net/inet6_hashtables.h> #include <net/busy_poll.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <trace/events/skb.h> #include "udp_impl.h" static u32 udp6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport) { static u32 udp6_ehash_secret __read_mostly; static u32 udp_ipv6_hash_secret __read_mostly; u32 lhash, fhash; net_get_random_once(&udp6_ehash_secret, sizeof(udp6_ehash_secret)); net_get_random_once(&udp_ipv6_hash_secret, sizeof(udp_ipv6_hash_secret)); lhash = (__force u32)laddr->s6_addr32[3]; fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); return __inet6_ehashfn(lhash, lport, fhash, fport, udp_ipv6_hash_secret + net_hash_mix(net)); } int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) { const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); int sk2_ipv6only = inet_v6_ipv6only(sk2); int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; /* if both are mapped, treat as IPv4 */ if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) return (!sk2_ipv6only && (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr || sk->sk_rcv_saddr == sk2->sk_rcv_saddr)); if (addr_type2 == IPV6_ADDR_ANY && !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) return 1; if (addr_type == IPV6_ADDR_ANY && !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED)) return 1; if (sk2_rcv_saddr6 && ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6)) return 1; return 0; } static u32 udp6_portaddr_hash(const struct net *net, const struct in6_addr *addr6, unsigned int port) { unsigned int hash, mix = net_hash_mix(net); if (ipv6_addr_any(addr6)) hash = jhash_1word(0, mix); else if (ipv6_addr_v4mapped(addr6)) hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); else hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); return hash ^ port; } int udp_v6_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum); unsigned int hash2_partial = udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); } static void udp_v6_rehash(struct sock *sk) { u16 new_hash = udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, inet_sk(sk)->inet_num); udp_lib_rehash(sk, new_hash); } static inline int compute_score(struct sock *sk, struct net *net, unsigned short hnum, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { int score; struct inet_sock *inet; if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || sk->sk_family != PF_INET6) return -1; score = 0; inet = inet_sk(sk); if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score++; } if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) return -1; score++; } if (!ipv6_addr_any(&sk->sk_v6_daddr)) { if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) return -1; score++; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score++; } return score; } #define SCORE2_MAX (1 + 1 + 1) static inline int compute_score2(struct sock *sk, struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned short hnum, int dif) { int score; struct inet_sock *inet; if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || sk->sk_family != PF_INET6) return -1; if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) return -1; score = 0; inet = inet_sk(sk); if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score++; } if (!ipv6_addr_any(&sk->sk_v6_daddr)) { if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) return -1; score++; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score++; } return score; } /* called with read_rcu_lock() */ static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned int hnum, int dif, struct udp_hslot *hslot2, unsigned int slot2) { struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; begin: result = NULL; badness = -1; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } else if (score == SCORE2_MAX) goto exact_match; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot2) goto begin; if (result) { exact_match: if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score2(result, net, saddr, sport, daddr, hnum, dif) < badness)) { sock_put(result); goto begin; } } return result; } struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif, struct udp_table *udptable) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; rcu_read_lock(); if (hslot->count > 10) { hash2 = udp6_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, slot2); if (!result) { hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, &in6addr_any, hnum, dif, hslot2, slot2); } rcu_read_unlock(); return result; } begin: result = NULL; badness = -1; sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score(result, net, hnum, saddr, sport, daddr, dport, dif) < badness)) { sock_put(result); goto begin; } } rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(__udp6_lib_lookup); static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { struct sock *sk; const struct ipv6hdr *iph = ipv6_hdr(skb); sk = skb_steal_sock(skb); if (unlikely(sk)) return sk; return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), udptable); } struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); } EXPORT_SYMBOL_GPL(udp6_lib_lookup); /* * This should be easy, if there is something there we * return it, otherwise we block. */ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); int is_udp4; bool slow; if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len, addr_len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len, addr_len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; is_udp4 = (skb->protocol == htons(ETH_P_IP)); /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), msg, copied); else { err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udpv6_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) { if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); } sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); sin6->sin6_family = AF_INET6; sin6->sin6_port = udp_hdr(skb)->source; sin6->sin6_flowinfo = 0; if (is_udp4) { ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin6->sin6_addr); sin6->sin6_scope_id = 0; } else { sin6->sin6_addr = ipv6_hdr(skb)->saddr; sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, inet6_iif(skb)); } *addr_len = sizeof(*sin6); } if (np->rxopt.all) ip6_datagram_recv_common_ctl(sk, msg, skb); if (is_udp4) { if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); } else { if (np->rxopt.all) ip6_datagram_recv_specific_ctl(sk, msg, skb); } err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { if (is_udp4) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } else { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; } void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info, struct udp_table *udptable) { struct ipv6_pinfo *np; const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct in6_addr *saddr = &hdr->saddr; const struct in6_addr *daddr = &hdr->daddr; struct udphdr *uh = (struct udphdr *)(skb->data+offset); struct sock *sk; int err; struct net *net = dev_net(skb->dev); sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, inet6_iif(skb), udptable); if (!sk) { ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; } if (type == ICMPV6_PKT_TOOBIG) { if (!ip6_sk_accept_pmtu(sk)) goto out; ip6_sk_update_pmtu(skb, sk, info); } if (type == NDISC_REDIRECT) { ip6_sk_redirect(skb, sk); goto out; } np = inet6_sk(sk); if (!icmpv6_err_convert(type, code, &err) && !np->recverr) goto out; if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) goto out; if (np->recverr) ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; if (!ipv6_addr_any(&sk->sk_v6_daddr)) { sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); sk_incoming_cpu_update(sk); } rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); return -1; } return 0; } static __inline__ void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); } static struct static_key udpv6_encap_needed __read_mostly; void udpv6_encap_enable(void) { if (!static_key_enabled(&udpv6_encap_needed)) static_key_slow_inc(&udpv6_encap_needed); } EXPORT_SYMBOL(udpv6_encap_enable); int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int rc; int is_udplite = IS_UDPLITE(sk); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; if (static_key_false(&udpv6_encap_needed) && up->encap_type) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* * This is an encapsulation socket so pass the skb to * the socket's udp_encap_rcv() hook. Otherwise, just * fall through and pass this up the UDP socket. * up->encap_rcv() returns the following value: * =0 if skb was successfully passed to the encap * handler or was discarded by it. * >0 if skb should be passed on to UDP. * <0 if skb should be resubmitted as proto -N */ /* if we're overly short, let UDP handle it */ encap_rcv = ACCESS_ONCE(up->encap_rcv); if (skb->len > sizeof(struct udphdr) && encap_rcv) { int ret; /* Verify checksum before giving to encap */ if (udp_lib_checksum_complete(skb)) goto csum_error; ret = encap_rcv(sk, skb); if (ret <= 0) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); return -ret; } } /* FALLTHROUGH -- it's a UDP Packet */ } /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { if (up->pcrlen == 0) { /* full coverage was set */ net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } if (rcu_access_pointer(sk->sk_filter)) { if (udp_lib_checksum_complete(skb)) goto csum_error; } if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); goto drop; } skb_dst_drop(skb); bh_lock_sock(sk); rc = 0; if (!sock_owned_by_user(sk)) rc = __udpv6_queue_rcv_skb(sk, skb); else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); goto drop; } bh_unlock_sock(sk); return rc; csum_error: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; } static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, __be16 loc_port, const struct in6_addr *loc_addr, __be16 rmt_port, const struct in6_addr *rmt_addr, int dif, unsigned short hnum) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net)) return false; if (udp_sk(sk)->udp_port_hash != hnum || sk->sk_family != PF_INET6 || (inet->inet_dport && inet->inet_dport != rmt_port) || (!ipv6_addr_any(&sk->sk_v6_daddr) && !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) || (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) return false; if (!inet6_mc_check(sk, loc_addr, rmt_addr)) return false; return true; } static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { struct sk_buff *skb1 = NULL; struct sock *sk; unsigned int i; for (i = 0; i < count; i++) { sk = stack[i]; if (likely(!skb1)) skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { atomic_inc(&sk->sk_drops); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); } if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0) skb1 = NULL; sock_put(sk); } if (unlikely(skb1)) kfree_skb(skb1); } static void udp6_csum_zero_error(struct sk_buff *skb) { /* RFC 2460 section 8.1 says that we SHOULD log * this error. Well, it is reasonable. */ net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); } /* * Note: called only from the BH handler context, * so we don't need to lock the hashes. */ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, struct udp_table *udptable, int proto) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; const struct udphdr *uh = udp_hdr(skb); struct hlist_nulls_node *node; unsigned short hnum = ntohs(uh->dest); struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); int dif = inet6_iif(skb); unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); bool inner_flushed = false; if (use_hash2) { hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & udp_table.mask; hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; start_lookup: hslot = &udp_table.hash2[hash2]; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } spin_lock(&hslot->lock); sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) { if (__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, uh->source, saddr, dif, hnum) && /* If zero checksum and no_check is not on for * the socket then skip it. */ (uh->check || udp_sk(sk)->no_check6_rx)) { if (unlikely(count == ARRAY_SIZE(stack))) { flush_stack(stack, count, skb, ~0); inner_flushed = true; count = 0; } stack[count++] = sk; sock_hold(sk); } } spin_unlock(&hslot->lock); /* Also lookup *:port if we are using hash2 and haven't done so yet. */ if (use_hash2 && hash2 != hash2_any) { hash2 = hash2_any; goto start_lookup; } if (count) { flush_stack(stack, count, skb, count - 1); } else { if (!inner_flushed) UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, proto == IPPROTO_UDPLITE); consume_skb(skb); } return 0; } int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct net *net = dev_net(skb->dev); struct sock *sk; struct udphdr *uh; const struct in6_addr *saddr, *daddr; u32 ulen = 0; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto discard; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); ulen = ntohs(uh->len); if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ /* Check for jumbo payload */ if (ulen == 0) ulen = skb->len; if (ulen < sizeof(*uh)) goto short_packet; if (ulen < skb->len) { if (pskb_trim_rcsum(skb, ulen)) goto short_packet; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); } } if (udp6_csum_init(skb, uh, proto)) goto csum_error; /* * Multicast receive code */ if (ipv6_addr_is_multicast(daddr)) return __udp6_lib_mcast_deliver(net, skb, saddr, daddr, udptable, proto); /* Unicast */ /* * check socket cache ... must talk to Alan about his plans * for sock caches... i'll skip this for now. */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk) { int ret; if (!uh->check && !udp_sk(sk)->no_check6_rx) { sock_put(sk); udp6_csum_zero_error(skb); goto csum_error; } if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, ip6_compute_pseudo); ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } if (!uh->check) { udp6_csum_zero_error(skb); goto csum_error; } if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; if (udp_lib_checksum_complete(skb)) goto csum_error; UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); return 0; short_packet: net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", proto == IPPROTO_UDPLITE ? "-Lite" : "", saddr, ntohs(uh->source), ulen, skb->len, daddr, ntohs(uh->dest)); goto discard; csum_error: UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); discard: UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } static __inline__ int udpv6_rcv(struct sk_buff *skb) { return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); } /* * Throw away all pending data and cancel the corking. Socket is locked. */ static void udp_v6_flush_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); if (up->pending == AF_INET) udp_flush_pending_frames(sk); else if (up->pending) { up->len = 0; up->pending = 0; ip6_flush_pending_frames(sk); } } /** * udp6_hwcsum_outgoing - handle outgoing HW checksumming * @sk: socket we are sending on * @skb: sk_buff containing the filled-in UDP header * (checksum field must be zeroed out) */ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, int len) { unsigned int offset; struct udphdr *uh = udp_hdr(skb); struct sk_buff *frags = skb_shinfo(skb)->frag_list; __wsum csum = 0; if (!frags) { /* Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); } else { /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); skb->ip_summed = CHECKSUM_NONE; do { csum = csum_add(csum, frags->csum); } while ((frags = frags->next)); uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } } /* * Sending */ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6) { struct sock *sk = skb->sk; struct udphdr *uh; int err = 0; int is_udplite = IS_UDPLITE(sk); __wsum csum = 0; int offset = skb_transport_offset(skb); int len = skb->len - offset; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = fl6->fl6_sport; uh->dest = fl6->fl6_dport; uh->len = htons(len); uh->check = 0; if (is_udplite) csum = udplite_csum(skb); else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ skb->ip_summed = CHECKSUM_NONE; goto send; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); goto send; } else csum = udp_csum(skb); /* add protocol-dependent pseudo-header */ uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, len, fl6->flowi6_proto, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip6_send_skb(skb); if (err) { if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); return err; } static int udp_v6_push_pending_frames(struct sock *sk) { struct sk_buff *skb; struct udp_sock *up = udp_sk(sk); struct flowi6 fl6; int err = 0; if (up->pending == AF_INET) return udp_push_pending_frames(sk); /* ip6_finish_skb will release the cork, so make a copy of * fl6 here. */ fl6 = inet_sk(sk)->cork.fl.u.ip6; skb = ip6_finish_skb(sk); if (!skb) goto out; err = udp_v6_send_skb(skb, &fl6); out: up->len = 0; up->pending = 0; return err; } int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); struct in6_addr *daddr, *final_p, final; struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct flowi6 fl6; struct dst_entry *dst; int addr_len = msg->msg_namelen; int ulen = len; int hlimit = -1; int tclass = -1; int dontfrag = -1; int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int err; int connected = 0; int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); /* destination address check */ if (sin6) { if (addr_len < offsetof(struct sockaddr, sa_data)) return -EINVAL; switch (sin6->sin6_family) { case AF_INET6: if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; daddr = &sin6->sin6_addr; break; case AF_INET: goto do_udp_sendmsg; case AF_UNSPEC: msg->msg_name = sin6 = NULL; msg->msg_namelen = addr_len = 0; daddr = NULL; break; default: return -EINVAL; } } else if (!up->pending) { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = &sk->sk_v6_daddr; } else daddr = NULL; if (daddr) { if (ipv6_addr_v4mapped(daddr)) { struct sockaddr_in sin; sin.sin_family = AF_INET; sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; sin.sin_addr.s_addr = daddr->s6_addr32[3]; msg->msg_name = &sin; msg->msg_namelen = sizeof(sin); do_udp_sendmsg: if (__ipv6_only_sock(sk)) return -ENETUNREACH; return udp_sendmsg(sk, msg, len); } } if (up->pending == AF_INET) return udp_sendmsg(sk, msg, len); /* Rough check on arithmetic overflow, better check is made in ip6_append_data(). */ if (len > INT_MAX - sizeof(struct udphdr)) return -EMSGSIZE; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; if (up->pending) { /* * There are pending frames. * The socket lock must be held while it's corked. */ lock_sock(sk); if (likely(up->pending)) { if (unlikely(up->pending != AF_INET6)) { release_sock(sk); return -EAFNOSUPPORT; } dst = NULL; goto do_append_data; } release_sock(sk); } ulen += sizeof(struct udphdr); memset(&fl6, 0, sizeof(fl6)); if (sin6) { if (sin6->sin6_port == 0) return -EINVAL; fl6.fl6_dport = sin6->sin6_port; daddr = &sin6->sin6_addr; if (np->sndflow) { fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (!flowlabel) return -EINVAL; } } /* * Otherwise it will be difficult to maintain * sk->sk_dst_cache. */ if (sk->sk_state == TCP_ESTABLISHED && ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) daddr = &sk->sk_v6_daddr; if (addr_len >= sizeof(struct sockaddr_in6) && sin6->sin6_scope_id && __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) fl6.flowi6_oif = sin6->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; fl6.fl6_dport = inet->inet_dport; daddr = &sk->sk_v6_daddr; fl6.flowlabel = np->flow_label; connected = 1; } if (!fl6.flowi6_oif) fl6.flowi6_oif = sk->sk_bound_dev_if; if (!fl6.flowi6_oif) fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; fl6.flowi6_mark = sk->sk_mark; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(*opt); err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; } if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (!flowlabel) return -EINVAL; } if (!(opt->opt_nflen|opt->opt_flen)) opt = NULL; connected = 0; } if (!opt) opt = np->opt; if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); fl6.flowi6_proto = sk->sk_protocol; if (!ipv6_addr_any(daddr)) fl6.daddr = *daddr; else fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) fl6.saddr = np->saddr; fl6.fl6_sport = inet->inet_sport; final_p = fl6_update_dst(&fl6, opt, &final); if (final_p) connected = 0; if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { fl6.flowi6_oif = np->mcast_oif; connected = 0; } else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; goto out; } if (hlimit < 0) hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); if (tclass < 0) tclass = np->tclass; if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: /* Lockless fast path for the non-corking case */ if (!corkreq) { struct sk_buff *skb; skb = ip6_make_skb(sk, getfrag, msg, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, msg->msg_flags, dontfrag); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_v6_send_skb(skb, &fl6); goto release_dst; } lock_sock(sk); if (unlikely(up->pending)) { /* The socket is already corked while preparing it. */ /* ... which is an evident application bug. --ANK */ release_sock(sk); net_dbg_ratelimited("udp cork app bug 2\n"); err = -EINVAL; goto out; } up->pending = AF_INET6; do_append_data: if (dontfrag < 0) dontfrag = np->dontfrag; up->len += ulen; err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); if (err) udp_v6_flush_pending_frames(sk); else if (!corkreq) err = udp_v6_push_pending_frames(sk); else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) up->pending = 0; if (err > 0) err = np->recverr ? net_xmit_errno(err) : 0; release_sock(sk); release_dst: if (dst) { if (connected) { ip6_dst_store(sk, dst, ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? &sk->sk_v6_daddr : NULL, #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_equal(&fl6.saddr, &np->saddr) ? &np->saddr : #endif NULL); } else { dst_release(dst); } dst = NULL; } out: dst_release(dst); fl6_sock_release(flowlabel); if (!err) return len; /* * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting * ENOBUFS might not be good (it's not tunable per se), but otherwise * we don't have a good statistic (IpOutDiscards but it can be too many * things). We could add another new stat but at least for now that * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); } return err; do_confirm: dst_confirm(dst); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } void udpv6_destroy_sock(struct sock *sk) { struct udp_sock *up = udp_sk(sk); lock_sock(sk); udp_v6_flush_pending_frames(sk); release_sock(sk); if (static_key_false(&udpv6_encap_needed) && up->encap_type) { void (*encap_destroy)(struct sock *sk); encap_destroy = ACCESS_ONCE(up->encap_destroy); if (encap_destroy) encap_destroy(sk); } inet6_destroy_sock(sk); } /* * Socket option code for UDP */ int udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_v6_push_pending_frames); return ipv6_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_v6_push_pending_frames); return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); } #endif int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return ipv6_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); } #endif static const struct inet6_protocol udpv6_protocol = { .handler = udpv6_rcv, .err_handler = udpv6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS int udp6_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); } else { int bucket = ((struct udp_iter_state *)seq->private)->bucket; struct inet_sock *inet = inet_sk(v); __u16 srcp = ntohs(inet->inet_sport); __u16 destp = ntohs(inet->inet_dport); ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); } return 0; } static const struct file_operations udp6_afinfo_seq_fops = { .owner = THIS_MODULE, .open = udp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct udp_seq_afinfo udp6_seq_afinfo = { .name = "udp6", .family = AF_INET6, .udp_table = &udp_table, .seq_fops = &udp6_afinfo_seq_fops, .seq_ops = { .show = udp6_seq_show, }, }; int __net_init udp6_proc_init(struct net *net) { return udp_proc_register(net, &udp6_seq_afinfo); } void udp6_proc_exit(struct net *net) { udp_proc_unregister(net, &udp6_seq_afinfo); } #endif /* CONFIG_PROC_FS */ void udp_v6_clear_sk(struct sock *sk, int size) { struct inet_sock *inet = inet_sk(sk); /* we do not want to clear pinet6 field, because of RCU lookups */ sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6)); size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); memset(&inet->pinet6 + 1, 0, size); } /* ------------------------------------------------------------------------ */ struct proto udpv6_prot = { .name = "UDPv6", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .destroy = udpv6_destroy_sock, .setsockopt = udpv6_setsockopt, .getsockopt = udpv6_getsockopt, .sendmsg = udpv6_sendmsg, .recvmsg = udpv6_recvmsg, .backlog_rcv = __udpv6_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v6_rehash, .get_port = udp_v6_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, .sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_rmem = &sysctl_udp_rmem_min, .obj_size = sizeof(struct udp6_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .h.udp_table = &udp_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udpv6_setsockopt, .compat_getsockopt = compat_udpv6_getsockopt, #endif .clear_sk = udp_v6_clear_sk, }; static struct inet_protosw udpv6_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_UDP, .prot = &udpv6_prot, .ops = &inet6_dgram_ops, .flags = INET_PROTOSW_PERMANENT, }; int __init udpv6_init(void) { int ret; ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); if (ret) goto out; ret = inet6_register_protosw(&udpv6_protosw); if (ret) goto out_udpv6_protocol; out: return ret; out_udpv6_protocol: inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); goto out; } void udpv6_exit(void) { inet6_unregister_protosw(&udpv6_protosw); inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_1677_1
crossvul-cpp_data_bad_2166_2
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2002 Intel Corp. * Copyright (c) 2002 Nokia Corp. * * This is part of the SCTP Linux Kernel Implementation. * * These are the state functions for the state machine. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Mathew Kotowsky <kotowsky@sctp.org> * Sridhar Samudrala <samudrala@us.ibm.com> * Jon Grimm <jgrimm@us.ibm.com> * Hui Huang <hui.huang@nokia.com> * Dajiang Zhang <dajiang.zhang@nokia.com> * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Ryan Layer <rmlayer@us.ibm.com> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/slab.h> #include <net/sock.h> #include <net/inet_ecn.h> #include <linux/skbuff.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/structs.h> static struct sctp_packet *sctp_abort_pkt_new(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, const void *payload, size_t paylen); static int sctp_eat_data(const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands); static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, const struct sctp_association *asoc, const struct sctp_chunk *chunk); static void sctp_send_stale_cookie_err(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_chunk *err_chunk); static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands); static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands); static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands); static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, sctp_cmd_seq_t *commands, __be16 error, int sk_err, const struct sctp_association *asoc, struct sctp_transport *transport); static sctp_disposition_t sctp_sf_abort_violation( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, void *arg, sctp_cmd_seq_t *commands, const __u8 *payload, const size_t paylen); static sctp_disposition_t sctp_sf_violation_chunklen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands); static sctp_disposition_t sctp_sf_violation_paramlen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, void *ext, sctp_cmd_seq_t *commands); static sctp_disposition_t sctp_sf_violation_ctsn( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands); static sctp_disposition_t sctp_sf_violation_chunk( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands); static sctp_ierror_t sctp_sf_authenticate(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, struct sctp_chunk *chunk); static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands); /* Small helper function that checks if the chunk length * is of the appropriate length. The 'required_length' argument * is set to be the size of a specific chunk we are testing. * Return Values: 1 = Valid length * 0 = Invalid length * */ static inline int sctp_chunk_length_valid(struct sctp_chunk *chunk, __u16 required_length) { __u16 chunk_length = ntohs(chunk->chunk_hdr->length); if (unlikely(chunk_length < required_length)) return 0; return 1; } /********************************************************** * These are the state functions for handling chunk events. **********************************************************/ /* * Process the final SHUTDOWN COMPLETE. * * Section: 4 (C) (diagram), 9.2 * Upon reception of the SHUTDOWN COMPLETE chunk the endpoint will verify * that it is in SHUTDOWN-ACK-SENT state, if it is not the chunk should be * discarded. If the endpoint is in the SHUTDOWN-ACK-SENT state the endpoint * should stop the T2-shutdown timer and remove all knowledge of the * association (and thus the association enters the CLOSED state). * * Verification Tag: 8.5.1(C), sctpimpguide 2.41. * C) Rules for packet carrying SHUTDOWN COMPLETE: * ... * - The receiver of a SHUTDOWN COMPLETE shall accept the packet * if the Verification Tag field of the packet matches its own tag and * the T bit is not set * OR * it is set to its peer's tag and the T bit is set in the Chunk * Flags. * Otherwise, the receiver MUST silently discard the packet * and take no further action. An endpoint MUST ignore the * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_4_C(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_ulpevent *ev; if (!sctp_vtag_verify_either(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* RFC 2960 6.10 Bundling * * An endpoint MUST NOT bundle INIT, INIT ACK or * SHUTDOWN COMPLETE with any other chunks. */ if (!chunk->singleton) return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands); /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* RFC 2960 10.2 SCTP-to-ULP * * H) SHUTDOWN COMPLETE notification * * When SCTP completes the shutdown procedures (section 9.2) this * notification is passed to the upper layer. */ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, 0, 0, 0, NULL, GFP_ATOMIC); if (ev) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); /* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint * will verify that it is in SHUTDOWN-ACK-SENT state, if it is * not the chunk should be discarded. If the endpoint is in * the SHUTDOWN-ACK-SENT state the endpoint should stop the * T2-shutdown timer and remove all knowledge of the * association (and thus the association enters the CLOSED * state). */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); return SCTP_DISPOSITION_DELETE_TCB; } /* * Respond to a normal INIT chunk. * We are the side that is being asked for an association. * * Section: 5.1 Normal Establishment of an Association, B * B) "Z" shall respond immediately with an INIT ACK chunk. The * destination IP address of the INIT ACK MUST be set to the source * IP address of the INIT to which this INIT ACK is responding. In * the response, besides filling in other parameters, "Z" must set the * Verification Tag field to Tag_A, and also provide its own * Verification Tag (Tag_Z) in the Initiate Tag field. * * Verification Tag: Must be 0. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *repl; struct sctp_association *new_asoc; struct sctp_chunk *err_chunk; struct sctp_packet *packet; sctp_unrecognized_param_t *unk_param; int len; /* 6.10 Bundling * An endpoint MUST NOT bundle INIT, INIT ACK or * SHUTDOWN COMPLETE with any other chunks. * * IG Section 2.11.2 * Furthermore, we require that the receiver of an INIT chunk MUST * enforce these rules by silently discarding an arriving packet * with an INIT chunk that is bundled with other chunks. */ if (!chunk->singleton) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* If the packet is an OOTB packet which is temporarily on the * control endpoint, respond with an ABORT. */ if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); } /* 3.1 A packet containing an INIT chunk MUST have a zero Verification * Tag. */ if (chunk->sctp_hdr->vtag != 0) return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); /* Make sure that the INIT chunk has a valid length. * Normally, this would cause an ABORT with a Protocol Violation * error, but since we don't have an association, we'll * just discard the packet. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* If the INIT is coming toward a closing socket, we'll send back * and ABORT. Essentially, this catches the race of INIT being * backloged to the socket at the same time as the user isses close(). * Since the socket and all its associations are going away, we * can treat this OOTB */ if (sctp_sstate(ep->base.sk, CLOSING)) return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); /* Verify the INIT chunk before processing it. */ err_chunk = NULL; if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, &err_chunk)) { /* This chunk contains fatal error. It is to be discarded. * Send an ABORT, with causes if there is any. */ if (err_chunk) { packet = sctp_abort_pkt_new(net, ep, asoc, arg, (__u8 *)(err_chunk->chunk_hdr) + sizeof(sctp_chunkhdr_t), ntohs(err_chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t)); sctp_chunk_free(err_chunk); if (packet) { sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); return SCTP_DISPOSITION_CONSUME; } else { return SCTP_DISPOSITION_NOMEM; } } else { return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); } } /* Grab the INIT header. */ chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data; /* Tag the variable length parameters. */ chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t)); new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); if (!new_asoc) goto nomem; if (sctp_assoc_set_bind_addr_from_ep(new_asoc, sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0) goto nomem_init; /* The call, sctp_process_init(), can fail on memory allocation. */ if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), (sctp_init_chunk_t *)chunk->chunk_hdr, GFP_ATOMIC)) goto nomem_init; /* B) "Z" shall respond immediately with an INIT ACK chunk. */ /* If there are errors need to be reported for unknown parameters, * make sure to reserve enough room in the INIT ACK for them. */ len = 0; if (err_chunk) len = ntohs(err_chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); if (!repl) goto nomem_init; /* If there are errors need to be reported for unknown parameters, * include them in the outgoing INIT ACK as "Unrecognized parameter" * parameter. */ if (err_chunk) { /* Get the "Unrecognized parameter" parameter(s) out of the * ERROR chunk generated by sctp_verify_init(). Since the * error cause code for "unknown parameter" and the * "Unrecognized parameter" type is the same, we can * construct the parameters in INIT ACK by copying the * ERROR causes over. */ unk_param = (sctp_unrecognized_param_t *) ((__u8 *)(err_chunk->chunk_hdr) + sizeof(sctp_chunkhdr_t)); /* Replace the cause code with the "Unrecognized parameter" * parameter type. */ sctp_addto_chunk(repl, len, unk_param); sctp_chunk_free(err_chunk); } sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); /* * Note: After sending out INIT ACK with the State Cookie parameter, * "Z" MUST NOT allocate any resources, nor keep any states for the * new association. Otherwise, "Z" will be vulnerable to resource * attacks. */ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); return SCTP_DISPOSITION_DELETE_TCB; nomem_init: sctp_association_free(new_asoc); nomem: if (err_chunk) sctp_chunk_free(err_chunk); return SCTP_DISPOSITION_NOMEM; } /* * Respond to a normal INIT ACK chunk. * We are the side that is initiating the association. * * Section: 5.1 Normal Establishment of an Association, C * C) Upon reception of the INIT ACK from "Z", "A" shall stop the T1-init * timer and leave COOKIE-WAIT state. "A" shall then send the State * Cookie received in the INIT ACK chunk in a COOKIE ECHO chunk, start * the T1-cookie timer, and enter the COOKIE-ECHOED state. * * Note: The COOKIE ECHO chunk can be bundled with any pending outbound * DATA chunks, but it MUST be the first chunk in the packet and * until the COOKIE ACK is returned the sender MUST NOT send any * other packets to the peer. * * Verification Tag: 3.3.3 * If the value of the Initiate Tag in a received INIT ACK chunk is * found to be 0, the receiver MUST treat it as an error and close the * association by transmitting an ABORT. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; sctp_init_chunk_t *initchunk; struct sctp_chunk *err_chunk; struct sctp_packet *packet; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* 6.10 Bundling * An endpoint MUST NOT bundle INIT, INIT ACK or * SHUTDOWN COMPLETE with any other chunks. */ if (!chunk->singleton) return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands); /* Make sure that the INIT-ACK chunk has a valid length */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Grab the INIT header. */ chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; /* Verify the INIT chunk before processing it. */ err_chunk = NULL; if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, &err_chunk)) { sctp_error_t error = SCTP_ERROR_NO_RESOURCE; /* This chunk contains fatal error. It is to be discarded. * Send an ABORT, with causes. If there are no causes, * then there wasn't enough memory. Just terminate * the association. */ if (err_chunk) { packet = sctp_abort_pkt_new(net, ep, asoc, arg, (__u8 *)(err_chunk->chunk_hdr) + sizeof(sctp_chunkhdr_t), ntohs(err_chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t)); sctp_chunk_free(err_chunk); if (packet) { sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); error = SCTP_ERROR_INV_PARAM; } } /* SCTP-AUTH, Section 6.3: * It should be noted that if the receiver wants to tear * down an association in an authenticated way only, the * handling of malformed packets should not result in * tearing down the association. * * This means that if we only want to abort associations * in an authenticated way (i.e AUTH+ABORT), then we * can't destroy this association just because the packet * was malformed. */ if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc, chunk->transport); } /* Tag the variable length parameters. Note that we never * convert the parameters in an INIT chunk. */ chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t)); initchunk = (sctp_init_chunk_t *) chunk->chunk_hdr; sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT, SCTP_PEER_INIT(initchunk)); /* Reset init error count upon receipt of INIT-ACK. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL()); /* 5.1 C) "A" shall stop the T1-init timer and leave * COOKIE-WAIT state. "A" shall then ... start the T1-cookie * timer, and enter the COOKIE-ECHOED state. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_COOKIE_ECHOED)); /* SCTP-AUTH: genereate the assocition shared keys so that * we can potentially signe the COOKIE-ECHO. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL()); /* 5.1 C) "A" shall then send the State Cookie received in the * INIT ACK chunk in a COOKIE ECHO chunk, ... */ /* If there is any errors to report, send the ERROR chunk generated * for unknown parameters as well. */ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO, SCTP_CHUNK(err_chunk)); return SCTP_DISPOSITION_CONSUME; } /* * Respond to a normal COOKIE ECHO chunk. * We are the side that is being asked for an association. * * Section: 5.1 Normal Establishment of an Association, D * D) Upon reception of the COOKIE ECHO chunk, Endpoint "Z" will reply * with a COOKIE ACK chunk after building a TCB and moving to * the ESTABLISHED state. A COOKIE ACK chunk may be bundled with * any pending DATA chunks (and/or SACK chunks), but the COOKIE ACK * chunk MUST be the first chunk in the packet. * * IMPLEMENTATION NOTE: An implementation may choose to send the * Communication Up notification to the SCTP user upon reception * of a valid COOKIE ECHO chunk. * * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules * D) Rules for packet carrying a COOKIE ECHO * * - When sending a COOKIE ECHO, the endpoint MUST use the value of the * Initial Tag received in the INIT ACK. * * - The receiver of a COOKIE ECHO follows the procedures in Section 5. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_association *new_asoc; sctp_init_chunk_t *peer_init; struct sctp_chunk *repl; struct sctp_ulpevent *ev, *ai_ev = NULL; int error = 0; struct sctp_chunk *err_chk_p; struct sock *sk; /* If the packet is an OOTB packet which is temporarily on the * control endpoint, respond with an ABORT. */ if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); } /* Make sure that the COOKIE_ECHO chunk has a valid length. * In this case, we check that we have enough for at least a * chunk header. More detailed verification is done * in sctp_unpack_cookie(). */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* If the endpoint is not listening or if the number of associations * on the TCP-style socket exceed the max backlog, respond with an * ABORT. */ sk = ep->base.sk; if (!sctp_sstate(sk, LISTENING) || (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); /* "Decode" the chunk. We have no optional parameters so we * are in good shape. */ chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t))) goto nomem; /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint * "Z" will reply with a COOKIE ACK chunk after building a TCB * and moving to the ESTABLISHED state. */ new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, &err_chk_p); /* FIXME: * If the re-build failed, what is the proper error path * from here? * * [We should abort the association. --piggy] */ if (!new_asoc) { /* FIXME: Several errors are possible. A bad cookie should * be silently discarded, but think about logging it too. */ switch (error) { case -SCTP_IERROR_NOMEM: goto nomem; case -SCTP_IERROR_STALE_COOKIE: sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, err_chk_p); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); case -SCTP_IERROR_BAD_SIG: default: return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } } /* Delay state machine commands until later. * * Re-build the bind address for the association is done in * the sctp_unpack_cookie() already. */ /* This is a brand-new association, so these are not yet side * effects--it is safe to run them here. */ peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; if (!sctp_process_init(new_asoc, chunk, &chunk->subh.cookie_hdr->c.peer_addr, peer_init, GFP_ATOMIC)) goto nomem_init; /* SCTP-AUTH: Now that we've populate required fields in * sctp_process_init, set up the assocaition shared keys as * necessary so that we can potentially authenticate the ACK */ error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC); if (error) goto nomem_init; /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo * is supposed to be authenticated and we have to do delayed * authentication. We've just recreated the association using * the information in the cookie and now it's much easier to * do the authentication. */ if (chunk->auth_chunk) { struct sctp_chunk auth; sctp_ierror_t ret; /* Make sure that we and the peer are AUTH capable */ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { sctp_association_free(new_asoc); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* set-up our fake chunk so that we can process it */ auth.skb = chunk->auth_chunk; auth.asoc = chunk->asoc; auth.sctp_hdr = chunk->sctp_hdr; auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk, sizeof(sctp_chunkhdr_t)); skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t)); auth.transport = chunk->transport; ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); if (ret != SCTP_IERROR_NO_ERROR) { sctp_association_free(new_asoc); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } } repl = sctp_make_cookie_ack(new_asoc, chunk); if (!repl) goto nomem_init; /* RFC 2960 5.1 Normal Establishment of an Association * * D) IMPLEMENTATION NOTE: An implementation may choose to * send the Communication Up notification to the SCTP user * upon reception of a valid COOKIE ECHO chunk. */ ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0, new_asoc->c.sinit_num_ostreams, new_asoc->c.sinit_max_instreams, NULL, GFP_ATOMIC); if (!ev) goto nomem_ev; /* Sockets API Draft Section 5.3.1.6 * When a peer sends a Adaptation Layer Indication parameter , SCTP * delivers this notification to inform the application that of the * peers requested adaptation layer. */ if (new_asoc->peer.adaptation_ind) { ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc, GFP_ATOMIC); if (!ai_ev) goto nomem_aiev; } /* Add all the state machine commands now since we've created * everything. This way we don't introduce memory corruptions * during side-effect processing and correclty count established * associations. */ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); /* This will send the COOKIE ACK */ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); /* Queue the ASSOC_CHANGE event */ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); /* Send up the Adaptation Layer Indication event */ if (ai_ev) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ai_ev)); return SCTP_DISPOSITION_CONSUME; nomem_aiev: sctp_ulpevent_free(ev); nomem_ev: sctp_chunk_free(repl); nomem_init: sctp_association_free(new_asoc); nomem: return SCTP_DISPOSITION_NOMEM; } /* * Respond to a normal COOKIE ACK chunk. * We are the side that is being asked for an association. * * RFC 2960 5.1 Normal Establishment of an Association * * E) Upon reception of the COOKIE ACK, endpoint "A" will move from the * COOKIE-ECHOED state to the ESTABLISHED state, stopping the T1-cookie * timer. It may also notify its ULP about the successful * establishment of the association with a Communication Up * notification (see Section 10). * * Verification Tag: * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_ulpevent *ev; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Verify that the chunk length for the COOKIE-ACK is OK. * If we don't do this, any bundled chunks may be junked. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Reset init error count upon receipt of COOKIE-ACK, * to avoid problems with the managemement of this * counter in stale cookie situations when a transition back * from the COOKIE-ECHOED state to the COOKIE-WAIT * state is performed. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL()); /* RFC 2960 5.1 Normal Establishment of an Association * * E) Upon reception of the COOKIE ACK, endpoint "A" will move * from the COOKIE-ECHOED state to the ESTABLISHED state, * stopping the T1-cookie timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); /* It may also notify its ULP about the successful * establishment of the association with a Communication Up * notification (see Section 10). */ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0, asoc->c.sinit_num_ostreams, asoc->c.sinit_max_instreams, NULL, GFP_ATOMIC); if (!ev) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); /* Sockets API Draft Section 5.3.1.6 * When a peer sends a Adaptation Layer Indication parameter , SCTP * delivers this notification to inform the application that of the * peers requested adaptation layer. */ if (asoc->peer.adaptation_ind) { ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); if (!ev) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); } return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; } /* Generate and sendout a heartbeat packet. */ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_transport *transport = (struct sctp_transport *) arg; struct sctp_chunk *reply; /* Send a heartbeat to our peer. */ reply = sctp_make_heartbeat(asoc, transport); if (!reply) return SCTP_DISPOSITION_NOMEM; /* Set rto_pending indicating that an RTT measurement * is started with this heartbeat chunk. */ sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING, SCTP_TRANSPORT(transport)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); return SCTP_DISPOSITION_CONSUME; } /* Generate a HEARTBEAT packet on the given transport. */ sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_transport *transport = (struct sctp_transport *) arg; if (asoc->overall_error_count >= asoc->max_retrans) { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_NO_ERROR)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_DELETE_TCB; } /* Section 3.3.5. * The Sender-specific Heartbeat Info field should normally include * information about the sender's current time when this HEARTBEAT * chunk is sent and the destination transport address to which this * HEARTBEAT is sent (see Section 8.3). */ if (transport->param_flags & SPP_HB_ENABLE) { if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type, arg, commands)) return SCTP_DISPOSITION_NOMEM; /* Set transport error counter and association error counter * when sending heartbeat. */ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, SCTP_TRANSPORT(transport)); } sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE, SCTP_TRANSPORT(transport)); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE, SCTP_TRANSPORT(transport)); return SCTP_DISPOSITION_CONSUME; } /* * Process an heartbeat request. * * Section: 8.3 Path Heartbeat * The receiver of the HEARTBEAT should immediately respond with a * HEARTBEAT ACK that contains the Heartbeat Information field copied * from the received HEARTBEAT chunk. * * Verification Tag: 8.5 Verification Tag [Normal verification] * When receiving an SCTP packet, the endpoint MUST ensure that the * value in the Verification Tag field of the received SCTP packet * matches its own Tag. If the received Verification Tag value does not * match the receiver's own tag value, the receiver shall silently * discard the packet and shall not process it any further except for * those cases listed in Section 8.5.1 below. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_beat_8_3(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_paramhdr_t *param_hdr; struct sctp_chunk *chunk = arg; struct sctp_chunk *reply; size_t paylen = 0; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the HEARTBEAT chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* 8.3 The receiver of the HEARTBEAT should immediately * respond with a HEARTBEAT ACK that contains the Heartbeat * Information field copied from the received HEARTBEAT chunk. */ chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data; param_hdr = (sctp_paramhdr_t *) chunk->subh.hb_hdr; paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); if (ntohs(param_hdr->length) > paylen) return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, param_hdr, commands); if (!pskb_pull(chunk->skb, paylen)) goto nomem; reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen); if (!reply) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; } /* * Process the returning HEARTBEAT ACK. * * Section: 8.3 Path Heartbeat * Upon the receipt of the HEARTBEAT ACK, the sender of the HEARTBEAT * should clear the error counter of the destination transport * address to which the HEARTBEAT was sent, and mark the destination * transport address as active if it is not so marked. The endpoint may * optionally report to the upper layer when an inactive destination * address is marked as active due to the reception of the latest * HEARTBEAT ACK. The receiver of the HEARTBEAT ACK must also * clear the association overall error count as well (as defined * in section 8.1). * * The receiver of the HEARTBEAT ACK should also perform an RTT * measurement for that destination transport address using the time * value carried in the HEARTBEAT ACK chunk. * * Verification Tag: 8.5 Verification Tag [Normal verification] * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; union sctp_addr from_addr; struct sctp_transport *link; sctp_sender_hb_info_t *hbinfo; unsigned long max_interval; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) + sizeof(sctp_sender_hb_info_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; /* Make sure that the length of the parameter is what we expect */ if (ntohs(hbinfo->param_hdr.length) != sizeof(sctp_sender_hb_info_t)) { return SCTP_DISPOSITION_DISCARD; } from_addr = hbinfo->daddr; link = sctp_assoc_lookup_paddr(asoc, &from_addr); /* This should never happen, but lets log it if so. */ if (unlikely(!link)) { if (from_addr.sa.sa_family == AF_INET6) { net_warn_ratelimited("%s association %p could not find address %pI6\n", __func__, asoc, &from_addr.v6.sin6_addr); } else { net_warn_ratelimited("%s association %p could not find address %pI4\n", __func__, asoc, &from_addr.v4.sin_addr.s_addr); } return SCTP_DISPOSITION_DISCARD; } /* Validate the 64-bit random nonce. */ if (hbinfo->hb_nonce != link->hb_nonce) return SCTP_DISPOSITION_DISCARD; max_interval = link->hbinterval + link->rto; /* Check if the timestamp looks valid. */ if (time_after(hbinfo->sent_at, jiffies) || time_after(jiffies, hbinfo->sent_at + max_interval)) { pr_debug("%s: HEARTBEAT ACK with invalid timestamp received " "for transport:%p\n", __func__, link); return SCTP_DISPOSITION_DISCARD; } /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of * the HEARTBEAT should clear the error counter of the * destination transport address to which the HEARTBEAT was * sent and mark the destination transport address as active if * it is not so marked. */ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link)); return SCTP_DISPOSITION_CONSUME; } /* Helper function to send out an abort for the restart * condition. */ static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa, struct sctp_chunk *init, sctp_cmd_seq_t *commands) { int len; struct sctp_packet *pkt; union sctp_addr_param *addrparm; struct sctp_errhdr *errhdr; struct sctp_endpoint *ep; char buffer[sizeof(struct sctp_errhdr)+sizeof(union sctp_addr_param)]; struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family); /* Build the error on the stack. We are way to malloc crazy * throughout the code today. */ errhdr = (struct sctp_errhdr *)buffer; addrparm = (union sctp_addr_param *)errhdr->variable; /* Copy into a parm format. */ len = af->to_addr_param(ssa, addrparm); len += sizeof(sctp_errhdr_t); errhdr->cause = SCTP_ERROR_RESTART; errhdr->length = htons(len); /* Assign to the control socket. */ ep = sctp_sk(net->sctp.ctl_sock)->ep; /* Association is NULL since this may be a restart attack and we * want to send back the attacker's vtag. */ pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len); if (!pkt) goto out; sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); /* Discard the rest of the inbound packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); out: /* Even if there is no memory, treat as a failure so * the packet will get dropped. */ return 0; } static bool list_has_sctp_addr(const struct list_head *list, union sctp_addr *ipaddr) { struct sctp_transport *addr; list_for_each_entry(addr, list, transports) { if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr)) return true; } return false; } /* A restart is occurring, check to make sure no new addresses * are being added as we may be under a takeover attack. */ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, const struct sctp_association *asoc, struct sctp_chunk *init, sctp_cmd_seq_t *commands) { struct net *net = sock_net(new_asoc->base.sk); struct sctp_transport *new_addr; int ret = 1; /* Implementor's Guide - Section 5.2.2 * ... * Before responding the endpoint MUST check to see if the * unexpected INIT adds new addresses to the association. If new * addresses are added to the association, the endpoint MUST respond * with an ABORT.. */ /* Search through all current addresses and make sure * we aren't adding any new ones. */ list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, transports) { if (!list_has_sctp_addr(&asoc->peer.transport_addr_list, &new_addr->ipaddr)) { sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init, commands); ret = 0; break; } } /* Return success if all addresses were found. */ return ret; } /* Populate the verification/tie tags based on overlapping INIT * scenario. * * Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state. */ static void sctp_tietags_populate(struct sctp_association *new_asoc, const struct sctp_association *asoc) { switch (asoc->state) { /* 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State */ case SCTP_STATE_COOKIE_WAIT: new_asoc->c.my_vtag = asoc->c.my_vtag; new_asoc->c.my_ttag = asoc->c.my_vtag; new_asoc->c.peer_ttag = 0; break; case SCTP_STATE_COOKIE_ECHOED: new_asoc->c.my_vtag = asoc->c.my_vtag; new_asoc->c.my_ttag = asoc->c.my_vtag; new_asoc->c.peer_ttag = asoc->c.peer_vtag; break; /* 5.2.2 Unexpected INIT in States Other than CLOSED, COOKIE-ECHOED, * COOKIE-WAIT and SHUTDOWN-ACK-SENT */ default: new_asoc->c.my_ttag = asoc->c.my_vtag; new_asoc->c.peer_ttag = asoc->c.peer_vtag; break; } /* Other parameters for the endpoint SHOULD be copied from the * existing parameters of the association (e.g. number of * outbound streams) into the INIT ACK and cookie. */ new_asoc->rwnd = asoc->rwnd; new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams; new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams; new_asoc->c.initial_tsn = asoc->c.initial_tsn; } /* * Compare vtag/tietag values to determine unexpected COOKIE-ECHO * handling action. * * RFC 2960 5.2.4 Handle a COOKIE ECHO when a TCB exists. * * Returns value representing action to be taken. These action values * correspond to Action/Description values in RFC 2960, Table 2. */ static char sctp_tietags_compare(struct sctp_association *new_asoc, const struct sctp_association *asoc) { /* In this case, the peer may have restarted. */ if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && (asoc->c.peer_vtag != new_asoc->c.peer_vtag) && (asoc->c.my_vtag == new_asoc->c.my_ttag) && (asoc->c.peer_vtag == new_asoc->c.peer_ttag)) return 'A'; /* Collision case B. */ if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && ((asoc->c.peer_vtag != new_asoc->c.peer_vtag) || (0 == asoc->c.peer_vtag))) { return 'B'; } /* Collision case D. */ if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && (asoc->c.peer_vtag == new_asoc->c.peer_vtag)) return 'D'; /* Collision case C. */ if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && (asoc->c.peer_vtag == new_asoc->c.peer_vtag) && (0 == new_asoc->c.my_ttag) && (0 == new_asoc->c.peer_ttag)) return 'C'; /* No match to any of the special cases; discard this packet. */ return 'E'; } /* Common helper routine for both duplicate and simulataneous INIT * chunk handling. */ static sctp_disposition_t sctp_sf_do_unexpected_init( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_disposition_t retval; struct sctp_chunk *chunk = arg; struct sctp_chunk *repl; struct sctp_association *new_asoc; struct sctp_chunk *err_chunk; struct sctp_packet *packet; sctp_unrecognized_param_t *unk_param; int len; /* 6.10 Bundling * An endpoint MUST NOT bundle INIT, INIT ACK or * SHUTDOWN COMPLETE with any other chunks. * * IG Section 2.11.2 * Furthermore, we require that the receiver of an INIT chunk MUST * enforce these rules by silently discarding an arriving packet * with an INIT chunk that is bundled with other chunks. */ if (!chunk->singleton) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* 3.1 A packet containing an INIT chunk MUST have a zero Verification * Tag. */ if (chunk->sctp_hdr->vtag != 0) return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); /* Make sure that the INIT chunk has a valid length. * In this case, we generate a protocol violation since we have * an association established. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Grab the INIT header. */ chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; /* Tag the variable length parameters. */ chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t)); /* Verify the INIT chunk before processing it. */ err_chunk = NULL; if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, &err_chunk)) { /* This chunk contains fatal error. It is to be discarded. * Send an ABORT, with causes if there is any. */ if (err_chunk) { packet = sctp_abort_pkt_new(net, ep, asoc, arg, (__u8 *)(err_chunk->chunk_hdr) + sizeof(sctp_chunkhdr_t), ntohs(err_chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t)); if (packet) { sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); retval = SCTP_DISPOSITION_CONSUME; } else { retval = SCTP_DISPOSITION_NOMEM; } goto cleanup; } else { return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); } } /* * Other parameters for the endpoint SHOULD be copied from the * existing parameters of the association (e.g. number of * outbound streams) into the INIT ACK and cookie. * FIXME: We are copying parameters from the endpoint not the * association. */ new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); if (!new_asoc) goto nomem; if (sctp_assoc_set_bind_addr_from_ep(new_asoc, sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0) goto nomem; /* In the outbound INIT ACK the endpoint MUST copy its current * Verification Tag and Peers Verification tag into a reserved * place (local tie-tag and per tie-tag) within the state cookie. */ if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), (sctp_init_chunk_t *)chunk->chunk_hdr, GFP_ATOMIC)) goto nomem; /* Make sure no new addresses are being added during the * restart. Do not do this check for COOKIE-WAIT state, * since there are no peer addresses to check against. * Upon return an ABORT will have been sent if needed. */ if (!sctp_state(asoc, COOKIE_WAIT)) { if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) { retval = SCTP_DISPOSITION_CONSUME; goto nomem_retval; } } sctp_tietags_populate(new_asoc, asoc); /* B) "Z" shall respond immediately with an INIT ACK chunk. */ /* If there are errors need to be reported for unknown parameters, * make sure to reserve enough room in the INIT ACK for them. */ len = 0; if (err_chunk) { len = ntohs(err_chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); } repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); if (!repl) goto nomem; /* If there are errors need to be reported for unknown parameters, * include them in the outgoing INIT ACK as "Unrecognized parameter" * parameter. */ if (err_chunk) { /* Get the "Unrecognized parameter" parameter(s) out of the * ERROR chunk generated by sctp_verify_init(). Since the * error cause code for "unknown parameter" and the * "Unrecognized parameter" type is the same, we can * construct the parameters in INIT ACK by copying the * ERROR causes over. */ unk_param = (sctp_unrecognized_param_t *) ((__u8 *)(err_chunk->chunk_hdr) + sizeof(sctp_chunkhdr_t)); /* Replace the cause code with the "Unrecognized parameter" * parameter type. */ sctp_addto_chunk(repl, len, unk_param); } sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); /* * Note: After sending out INIT ACK with the State Cookie parameter, * "Z" MUST NOT allocate any resources for this new association. * Otherwise, "Z" will be vulnerable to resource attacks. */ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); retval = SCTP_DISPOSITION_CONSUME; return retval; nomem: retval = SCTP_DISPOSITION_NOMEM; nomem_retval: if (new_asoc) sctp_association_free(new_asoc); cleanup: if (err_chunk) sctp_chunk_free(err_chunk); return retval; } /* * Handle simultaneous INIT. * This means we started an INIT and then we got an INIT request from * our peer. * * Section: 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State (Item B) * This usually indicates an initialization collision, i.e., each * endpoint is attempting, at about the same time, to establish an * association with the other endpoint. * * Upon receipt of an INIT in the COOKIE-WAIT or COOKIE-ECHOED state, an * endpoint MUST respond with an INIT ACK using the same parameters it * sent in its original INIT chunk (including its Verification Tag, * unchanged). These original parameters are combined with those from the * newly received INIT chunk. The endpoint shall also generate a State * Cookie with the INIT ACK. The endpoint uses the parameters sent in its * INIT to calculate the State Cookie. * * After that, the endpoint MUST NOT change its state, the T1-init * timer shall be left running and the corresponding TCB MUST NOT be * destroyed. The normal procedures for handling State Cookies when * a TCB exists will resolve the duplicate INITs to a single association. * * For an endpoint that is in the COOKIE-ECHOED state it MUST populate * its Tie-Tags with the Tag information of itself and its peer (see * section 5.2.2 for a description of the Tie-Tags). * * Verification Tag: Not explicit, but an INIT can not have a valid * verification tag, so we skip the check. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* Call helper to do the real work for both simulataneous and * duplicate INIT chunk handling. */ return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); } /* * Handle duplicated INIT messages. These are usually delayed * restransmissions. * * Section: 5.2.2 Unexpected INIT in States Other than CLOSED, * COOKIE-ECHOED and COOKIE-WAIT * * Unless otherwise stated, upon reception of an unexpected INIT for * this association, the endpoint shall generate an INIT ACK with a * State Cookie. In the outbound INIT ACK the endpoint MUST copy its * current Verification Tag and peer's Verification Tag into a reserved * place within the state cookie. We shall refer to these locations as * the Peer's-Tie-Tag and the Local-Tie-Tag. The outbound SCTP packet * containing this INIT ACK MUST carry a Verification Tag value equal to * the Initiation Tag found in the unexpected INIT. And the INIT ACK * MUST contain a new Initiation Tag (randomly generated see Section * 5.3.1). Other parameters for the endpoint SHOULD be copied from the * existing parameters of the association (e.g. number of outbound * streams) into the INIT ACK and cookie. * * After sending out the INIT ACK, the endpoint shall take no further * actions, i.e., the existing association, including its current state, * and the corresponding TCB MUST NOT be changed. * * Note: Only when a TCB exists and the association is not in a COOKIE- * WAIT state are the Tie-Tags populated. For a normal association INIT * (i.e. the endpoint is in a COOKIE-WAIT state), the Tie-Tags MUST be * set to 0 (indicating that no previous TCB existed). The INIT ACK and * State Cookie are populated as specified in section 5.2.1. * * Verification Tag: Not specified, but an INIT has no way of knowing * what the verification tag could be, so we ignore it. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* Call helper to do the real work for both simulataneous and * duplicate INIT chunk handling. */ return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); } /* * Unexpected INIT-ACK handler. * * Section 5.2.3 * If an INIT ACK received by an endpoint in any state other than the * COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk. * An unexpected INIT ACK usually indicates the processing of an old or * duplicated INIT chunk. */ sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* Per the above section, we'll discard the chunk if we have an * endpoint. If this is an OOTB INIT-ACK, treat it as such. */ if (ep == sctp_sk(net->sctp.ctl_sock)->ep) return sctp_sf_ootb(net, ep, asoc, type, arg, commands); else return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); } /* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A') * * Section 5.2.4 * A) In this case, the peer may have restarted. */ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_association *new_asoc) { sctp_init_chunk_t *peer_init; struct sctp_ulpevent *ev; struct sctp_chunk *repl; struct sctp_chunk *err; sctp_disposition_t disposition; /* new_asoc is a brand-new association, so these are not yet * side effects--it is safe to run them here. */ peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init, GFP_ATOMIC)) goto nomem; /* Make sure no new addresses are being added during the * restart. Though this is a pretty complicated attack * since you'd have to get inside the cookie. */ if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) { return SCTP_DISPOSITION_CONSUME; } /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes * the peer has restarted (Action A), it MUST NOT setup a new * association but instead resend the SHUTDOWN ACK and send an ERROR * chunk with a "Cookie Received while Shutting Down" error cause to * its peer. */ if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) { disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc, SCTP_ST_CHUNK(chunk->chunk_hdr->type), chunk, commands); if (SCTP_DISPOSITION_NOMEM == disposition) goto nomem; err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_COOKIE_IN_SHUTDOWN, NULL, 0, 0); if (err) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err)); return SCTP_DISPOSITION_CONSUME; } /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked * data. Consider the optional choice of resending of this data. */ sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL()); /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue * and ASCONF-ACK cache. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL()); repl = sctp_make_cookie_ack(new_asoc, chunk); if (!repl) goto nomem; /* Report association restart to upper layer. */ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0, new_asoc->c.sinit_num_ostreams, new_asoc->c.sinit_max_instreams, NULL, GFP_ATOMIC); if (!ev) goto nomem_ev; /* Update the content of current association. */ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); if (sctp_state(asoc, SHUTDOWN_PENDING) && (sctp_sstate(asoc->base.sk, CLOSING) || sock_flag(asoc->base.sk, SOCK_DEAD))) { /* if were currently in SHUTDOWN_PENDING, but the socket * has been closed by user, don't transition to ESTABLISHED. * Instead trigger SHUTDOWN bundled with COOKIE_ACK. */ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, SCTP_ST_CHUNK(0), NULL, commands); } else { sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); } return SCTP_DISPOSITION_CONSUME; nomem_ev: sctp_chunk_free(repl); nomem: return SCTP_DISPOSITION_NOMEM; } /* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'B') * * Section 5.2.4 * B) In this case, both sides may be attempting to start an association * at about the same time but the peer endpoint started its INIT * after responding to the local endpoint's INIT */ /* This case represents an initialization collision. */ static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_association *new_asoc) { sctp_init_chunk_t *peer_init; struct sctp_chunk *repl; /* new_asoc is a brand-new association, so these are not yet * side effects--it is safe to run them here. */ peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init, GFP_ATOMIC)) goto nomem; /* Update the content of current association. */ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); repl = sctp_make_cookie_ack(new_asoc, chunk); if (!repl) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); /* RFC 2960 5.1 Normal Establishment of an Association * * D) IMPLEMENTATION NOTE: An implementation may choose to * send the Communication Up notification to the SCTP user * upon reception of a valid COOKIE ECHO chunk. * * Sadly, this needs to be implemented as a side-effect, because * we are not guaranteed to have set the association id of the real * association and so these notifications need to be delayed until * the association id is allocated. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_CHANGE, SCTP_U8(SCTP_COMM_UP)); /* Sockets API Draft Section 5.3.1.6 * When a peer sends a Adaptation Layer Indication parameter , SCTP * delivers this notification to inform the application that of the * peers requested adaptation layer. * * This also needs to be done as a side effect for the same reason as * above. */ if (asoc->peer.adaptation_ind) sctp_add_cmd_sf(commands, SCTP_CMD_ADAPTATION_IND, SCTP_NULL()); return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; } /* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'C') * * Section 5.2.4 * C) In this case, the local endpoint's cookie has arrived late. * Before it arrived, the local endpoint sent an INIT and received an * INIT-ACK and finally sent a COOKIE ECHO with the peer's same tag * but a new tag of its own. */ /* This case represents an initialization collision. */ static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_association *new_asoc) { /* The cookie should be silently discarded. * The endpoint SHOULD NOT change states and should leave * any timers running. */ return SCTP_DISPOSITION_DISCARD; } /* Unexpected COOKIE-ECHO handler lost chunk (Table 2, action 'D') * * Section 5.2.4 * * D) When both local and remote tags match the endpoint should always * enter the ESTABLISHED state, if it has not already done so. */ /* This case represents an initialization collision. */ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_association *new_asoc) { struct sctp_ulpevent *ev = NULL, *ai_ev = NULL; struct sctp_chunk *repl; /* Clarification from Implementor's Guide: * D) When both local and remote tags match the endpoint should * enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state. * It should stop any cookie timer that may be running and send * a COOKIE ACK. */ /* Don't accidentally move back into established state. */ if (asoc->state < SCTP_STATE_ESTABLISHED) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); /* RFC 2960 5.1 Normal Establishment of an Association * * D) IMPLEMENTATION NOTE: An implementation may choose * to send the Communication Up notification to the * SCTP user upon reception of a valid COOKIE * ECHO chunk. */ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0, asoc->c.sinit_num_ostreams, asoc->c.sinit_max_instreams, NULL, GFP_ATOMIC); if (!ev) goto nomem; /* Sockets API Draft Section 5.3.1.6 * When a peer sends a Adaptation Layer Indication parameter, * SCTP delivers this notification to inform the application * that of the peers requested adaptation layer. */ if (asoc->peer.adaptation_ind) { ai_ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); if (!ai_ev) goto nomem; } } repl = sctp_make_cookie_ack(new_asoc, chunk); if (!repl) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); if (ev) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); if (ai_ev) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ai_ev)); return SCTP_DISPOSITION_CONSUME; nomem: if (ai_ev) sctp_ulpevent_free(ai_ev); if (ev) sctp_ulpevent_free(ev); return SCTP_DISPOSITION_NOMEM; } /* * Handle a duplicate COOKIE-ECHO. This usually means a cookie-carrying * chunk was retransmitted and then delayed in the network. * * Section: 5.2.4 Handle a COOKIE ECHO when a TCB exists * * Verification Tag: None. Do cookie validation. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_disposition_t retval; struct sctp_chunk *chunk = arg; struct sctp_association *new_asoc; int error = 0; char action; struct sctp_chunk *err_chk_p; /* Make sure that the chunk has a valid length from the protocol * perspective. In this case check to make sure we have at least * enough for the chunk header. Cookie length verification is * done later. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* "Decode" the chunk. We have no optional parameters so we * are in good shape. */ chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t))) goto nomem; /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie * of a duplicate COOKIE ECHO match the Verification Tags of the * current association, consider the State Cookie valid even if * the lifespan is exceeded. */ new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, &err_chk_p); /* FIXME: * If the re-build failed, what is the proper error path * from here? * * [We should abort the association. --piggy] */ if (!new_asoc) { /* FIXME: Several errors are possible. A bad cookie should * be silently discarded, but think about logging it too. */ switch (error) { case -SCTP_IERROR_NOMEM: goto nomem; case -SCTP_IERROR_STALE_COOKIE: sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, err_chk_p); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); case -SCTP_IERROR_BAD_SIG: default: return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } } /* Compare the tie_tag in cookie with the verification tag of * current association. */ action = sctp_tietags_compare(new_asoc, asoc); switch (action) { case 'A': /* Association restart. */ retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands, new_asoc); break; case 'B': /* Collision case B. */ retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands, new_asoc); break; case 'C': /* Collision case C. */ retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands, new_asoc); break; case 'D': /* Collision case D. */ retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands, new_asoc); break; default: /* Discard packet for all others. */ retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); break; } /* Delete the tempory new association. */ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); /* Restore association pointer to provide SCTP command interpeter * with a valid context in case it needs to manipulate * the queues */ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC((struct sctp_association *)asoc)); return retval; nomem: return SCTP_DISPOSITION_NOMEM; } /* * Process an ABORT. (SHUTDOWN-PENDING state) * * See sctp_sf_do_9_1_abort(). */ sctp_disposition_t sctp_sf_shutdown_pending_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; if (!sctp_vtag_verify_either(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the ABORT chunk has a valid length. * Since this is an ABORT chunk, we have to discard it * because of the following text: * RFC 2960, Section 3.3.7 * If an endpoint receives an ABORT with a format error or for an * association that doesn't exist, it MUST silently discard it. * Because the length is "invalid", we can't really discard just * as we do not know its true length. So, to be safe, discard the * packet. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* ADD-IP: Special case for ABORT chunks * F4) One special consideration is that ABORT Chunks arriving * destined to the IP address being deleted MUST be * ignored (see Section 5.3.1 for further details). */ if (SCTP_ADDR_DEL == sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); } /* * Process an ABORT. (SHUTDOWN-SENT state) * * See sctp_sf_do_9_1_abort(). */ sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; if (!sctp_vtag_verify_either(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the ABORT chunk has a valid length. * Since this is an ABORT chunk, we have to discard it * because of the following text: * RFC 2960, Section 3.3.7 * If an endpoint receives an ABORT with a format error or for an * association that doesn't exist, it MUST silently discard it. * Because the length is "invalid", we can't really discard just * as we do not know its true length. So, to be safe, discard the * packet. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* ADD-IP: Special case for ABORT chunks * F4) One special consideration is that ABORT Chunks arriving * destined to the IP address being deleted MUST be * ignored (see Section 5.3.1 for further details). */ if (SCTP_ADDR_DEL == sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); /* Stop the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); /* Stop the T5-shutdown guard timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); } /* * Process an ABORT. (SHUTDOWN-ACK-SENT state) * * See sctp_sf_do_9_1_abort(). */ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* The same T2 timer, so we should be able to use * common function with the SHUTDOWN-SENT state. */ return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands); } /* * Handle an Error received in COOKIE_ECHOED state. * * Only handle the error type of stale COOKIE Error, the other errors will * be ignored. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; sctp_errhdr_t *err; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the ERROR chunk has a valid length. * The parameter walking depends on this as well. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Process the error here */ /* FUTURE FIXME: When PR-SCTP related and other optional * parms are emitted, this will have to change to handle multiple * errors. */ sctp_walk_errors(err, chunk->chunk_hdr) { if (SCTP_ERROR_STALE_COOKIE == err->cause) return sctp_sf_do_5_2_6_stale(net, ep, asoc, type, arg, commands); } /* It is possible to have malformed error causes, and that * will cause us to end the walk early. However, since * we are discarding the packet, there should be no adverse * affects. */ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* * Handle a Stale COOKIE Error * * Section: 5.2.6 Handle Stale COOKIE Error * If the association is in the COOKIE-ECHOED state, the endpoint may elect * one of the following three alternatives. * ... * 3) Send a new INIT chunk to the endpoint, adding a Cookie * Preservative parameter requesting an extension to the lifetime of * the State Cookie. When calculating the time extension, an * implementation SHOULD use the RTT information measured based on the * previous COOKIE ECHO / ERROR exchange, and should add no more * than 1 second beyond the measured RTT, due to long State Cookie * lifetimes making the endpoint more subject to a replay attack. * * Verification Tag: Not explicit, but safe to ignore. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; time_t stale; sctp_cookie_preserve_param_t bht; sctp_errhdr_t *err; struct sctp_chunk *reply; struct sctp_bind_addr *bp; int attempts = asoc->init_err_counter + 1; if (attempts > asoc->max_init_attempts) { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_PERR(SCTP_ERROR_STALE_COOKIE)); return SCTP_DISPOSITION_DELETE_TCB; } err = (sctp_errhdr_t *)(chunk->skb->data); /* When calculating the time extension, an implementation * SHOULD use the RTT information measured based on the * previous COOKIE ECHO / ERROR exchange, and should add no * more than 1 second beyond the measured RTT, due to long * State Cookie lifetimes making the endpoint more subject to * a replay attack. * Measure of Staleness's unit is usec. (1/1000000 sec) * Suggested Cookie Life-span Increment's unit is msec. * (1/1000 sec) * In general, if you use the suggested cookie life, the value * found in the field of measure of staleness should be doubled * to give ample time to retransmit the new cookie and thus * yield a higher probability of success on the reattempt. */ stale = ntohl(*(__be32 *)((u8 *)err + sizeof(sctp_errhdr_t))); stale = (stale * 2) / 1000; bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE; bht.param_hdr.length = htons(sizeof(bht)); bht.lifespan_increment = htonl(stale); /* Build that new INIT chunk. */ bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht)); if (!reply) goto nomem; sctp_addto_chunk(reply, sizeof(bht), &bht); /* Clear peer's init_tag cached in assoc as we are sending a new INIT */ sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL()); /* Stop pending T3-rtx and heartbeat timers */ sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); /* Delete non-primary peer ip addresses since we are transitioning * back to the COOKIE-WAIT state */ sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL()); /* If we've sent any data bundled with COOKIE-ECHO we will need to * resend */ sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN, SCTP_TRANSPORT(asoc->peer.primary_path)); /* Cast away the const modifier, as we want to just * rerun it through as a sideffect. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_INC, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_COOKIE_WAIT)); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; } /* * Process an ABORT. * * Section: 9.1 * After checking the Verification Tag, the receiving endpoint shall * remove the association from its record, and shall report the * termination to its upper layer. * * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules * B) Rules for packet carrying ABORT: * * - The endpoint shall always fill in the Verification Tag field of the * outbound packet with the destination endpoint's tag value if it * is known. * * - If the ABORT is sent in response to an OOTB packet, the endpoint * MUST follow the procedure described in Section 8.4. * * - The receiver MUST accept the packet if the Verification Tag * matches either its own tag, OR the tag of its peer. Otherwise, the * receiver MUST silently discard the packet and take no further * action. * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; if (!sctp_vtag_verify_either(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the ABORT chunk has a valid length. * Since this is an ABORT chunk, we have to discard it * because of the following text: * RFC 2960, Section 3.3.7 * If an endpoint receives an ABORT with a format error or for an * association that doesn't exist, it MUST silently discard it. * Because the length is "invalid", we can't really discard just * as we do not know its true length. So, to be safe, discard the * packet. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* ADD-IP: Special case for ABORT chunks * F4) One special consideration is that ABORT Chunks arriving * destined to the IP address being deleted MUST be * ignored (see Section 5.3.1 for further details). */ if (SCTP_ADDR_DEL == sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); } static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; unsigned int len; __be16 error = SCTP_ERROR_NO_ERROR; /* See if we have an error cause code in the chunk. */ len = ntohs(chunk->chunk_hdr->length); if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) { sctp_errhdr_t *err; sctp_walk_errors(err, chunk->chunk_hdr); if ((void *)err != (void *)chunk->chunk_end) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); error = ((sctp_errhdr_t *)chunk->skb->data)->cause; } sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); /* ASSOC_FAILED will DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_ABORT; } /* * Process an ABORT. (COOKIE-WAIT state) * * See sctp_sf_do_9_1_abort() above. */ sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; unsigned int len; __be16 error = SCTP_ERROR_NO_ERROR; if (!sctp_vtag_verify_either(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the ABORT chunk has a valid length. * Since this is an ABORT chunk, we have to discard it * because of the following text: * RFC 2960, Section 3.3.7 * If an endpoint receives an ABORT with a format error or for an * association that doesn't exist, it MUST silently discard it. * Because the length is "invalid", we can't really discard just * as we do not know its true length. So, to be safe, discard the * packet. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* See if we have an error cause code in the chunk. */ len = ntohs(chunk->chunk_hdr->length); if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) error = ((sctp_errhdr_t *)chunk->skb->data)->cause; return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc, chunk->transport); } /* * Process an incoming ICMP as an ABORT. (COOKIE-WAIT state) */ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR, ENOPROTOOPT, asoc, (struct sctp_transport *)arg); } /* * Process an ABORT. (COOKIE-ECHOED state) */ sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* There is a single T1 timer, so we should be able to use * common function with the COOKIE-WAIT state. */ return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands); } /* * Stop T1 timer and abort association with "INIT failed". * * This is common code called by several sctp_sf_*_abort() functions above. */ static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, sctp_cmd_seq_t *commands, __be16 error, int sk_err, const struct sctp_association *asoc, struct sctp_transport *transport) { pr_debug("%s: ABORT received (INIT)\n", __func__); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err)); /* CMD_INIT_FAILED will DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_PERR(error)); return SCTP_DISPOSITION_ABORT; } /* * sctp_sf_do_9_2_shut * * Section: 9.2 * Upon the reception of the SHUTDOWN, the peer endpoint shall * - enter the SHUTDOWN-RECEIVED state, * * - stop accepting new data from its SCTP user * * - verify, by checking the Cumulative TSN Ack field of the chunk, * that all its outstanding DATA chunks have been received by the * SHUTDOWN sender. * * Once an endpoint as reached the SHUTDOWN-RECEIVED state it MUST NOT * send a SHUTDOWN in response to a ULP request. And should discard * subsequent SHUTDOWN chunks. * * If there are still outstanding DATA chunks left, the SHUTDOWN * receiver shall continue to follow normal data transmission * procedures defined in Section 6 until all outstanding DATA chunks * are acknowledged; however, the SHUTDOWN receiver MUST NOT accept * new data from its SCTP user. * * Verification Tag: 8.5 Verification Tag [Normal verification] * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; sctp_shutdownhdr_t *sdh; sctp_disposition_t disposition; struct sctp_ulpevent *ev; __u32 ctsn; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the SHUTDOWN chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Convert the elaborate header. */ sdh = (sctp_shutdownhdr_t *)chunk->skb->data; skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t)); chunk->subh.shutdown_hdr = sdh; ctsn = ntohl(sdh->cum_tsn_ack); if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, asoc->ctsn_ack_point); return SCTP_DISPOSITION_DISCARD; } /* If Cumulative TSN Ack beyond the max tsn currently * send, terminating the association and respond to the * sender with an ABORT. */ if (!TSN_lt(ctsn, asoc->next_tsn)) return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT * When a peer sends a SHUTDOWN, SCTP delivers this notification to * inform the application that it should cease sending data. */ ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC); if (!ev) { disposition = SCTP_DISPOSITION_NOMEM; goto out; } sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); /* Upon the reception of the SHUTDOWN, the peer endpoint shall * - enter the SHUTDOWN-RECEIVED state, * - stop accepting new data from its SCTP user * * [This is implicit in the new state.] */ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED)); disposition = SCTP_DISPOSITION_CONSUME; if (sctp_outq_is_empty(&asoc->outqueue)) { disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type, arg, commands); } if (SCTP_DISPOSITION_NOMEM == disposition) goto out; /* - verify, by checking the Cumulative TSN Ack field of the * chunk, that all its outstanding DATA chunks have been * received by the SHUTDOWN sender. */ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack)); out: return disposition; } /* * sctp_sf_do_9_2_shut_ctsn * * Once an endpoint has reached the SHUTDOWN-RECEIVED state, * it MUST NOT send a SHUTDOWN in response to a ULP request. * The Cumulative TSN Ack of the received SHUTDOWN chunk * MUST be processed. */ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; sctp_shutdownhdr_t *sdh; __u32 ctsn; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the SHUTDOWN chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); sdh = (sctp_shutdownhdr_t *)chunk->skb->data; ctsn = ntohl(sdh->cum_tsn_ack); if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, asoc->ctsn_ack_point); return SCTP_DISPOSITION_DISCARD; } /* If Cumulative TSN Ack beyond the max tsn currently * send, terminating the association and respond to the * sender with an ABORT. */ if (!TSN_lt(ctsn, asoc->next_tsn)) return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); /* verify, by checking the Cumulative TSN Ack field of the * chunk, that all its outstanding DATA chunks have been * received by the SHUTDOWN sender. */ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, SCTP_BE32(sdh->cum_tsn_ack)); return SCTP_DISPOSITION_CONSUME; } /* RFC 2960 9.2 * If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk * (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination * transport addresses (either in the IP addresses or in the INIT chunk) * that belong to this association, it should discard the INIT chunk and * retransmit the SHUTDOWN ACK chunk. */ sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = (struct sctp_chunk *) arg; struct sctp_chunk *reply; /* Make sure that the chunk has a valid length */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Since we are not going to really process this INIT, there * is no point in verifying chunk boundries. Just generate * the SHUTDOWN ACK. */ reply = sctp_make_shutdown_ack(asoc, chunk); if (NULL == reply) goto nomem; /* Set the transport for the SHUTDOWN ACK chunk and the timeout for * the T2-SHUTDOWN timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); /* and restart the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; } /* * sctp_sf_do_ecn_cwr * * Section: Appendix A: Explicit Congestion Notification * * CWR: * * RFC 2481 details a specific bit for a sender to send in the header of * its next outbound TCP segment to indicate to its peer that it has * reduced its congestion window. This is termed the CWR bit. For * SCTP the same indication is made by including the CWR chunk. * This chunk contains one data element, i.e. the TSN number that * was sent in the ECNE chunk. This element represents the lowest * TSN number in the datagram that was originally marked with the * CE bit. * * Verification Tag: 8.5 Verification Tag [Normal verification] * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_cwrhdr_t *cwr; struct sctp_chunk *chunk = arg; u32 lowest_tsn; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); cwr = (sctp_cwrhdr_t *) chunk->skb->data; skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t)); lowest_tsn = ntohl(cwr->lowest_tsn); /* Does this CWR ack the last sent congestion notification? */ if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) { /* Stop sending ECNE. */ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CWR, SCTP_U32(lowest_tsn)); } return SCTP_DISPOSITION_CONSUME; } /* * sctp_sf_do_ecne * * Section: Appendix A: Explicit Congestion Notification * * ECN-Echo * * RFC 2481 details a specific bit for a receiver to send back in its * TCP acknowledgements to notify the sender of the Congestion * Experienced (CE) bit having arrived from the network. For SCTP this * same indication is made by including the ECNE chunk. This chunk * contains one data element, i.e. the lowest TSN associated with the IP * datagram marked with the CE bit..... * * Verification Tag: 8.5 Verification Tag [Normal verification] * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_ecne(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_ecnehdr_t *ecne; struct sctp_chunk *chunk = arg; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); ecne = (sctp_ecnehdr_t *) chunk->skb->data; skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t)); /* If this is a newer ECNE than the last CWR packet we sent out */ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE, SCTP_U32(ntohl(ecne->lowest_tsn))); return SCTP_DISPOSITION_CONSUME; } /* * Section: 6.2 Acknowledgement on Reception of DATA Chunks * * The SCTP endpoint MUST always acknowledge the reception of each valid * DATA chunk. * * The guidelines on delayed acknowledgement algorithm specified in * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an * acknowledgement SHOULD be generated for at least every second packet * (not every second DATA chunk) received, and SHOULD be generated within * 200 ms of the arrival of any unacknowledged DATA chunk. In some * situations it may be beneficial for an SCTP transmitter to be more * conservative than the algorithms detailed in this document allow. * However, an SCTP transmitter MUST NOT be more aggressive than the * following algorithms allow. * * A SCTP receiver MUST NOT generate more than one SACK for every * incoming packet, other than to update the offered window as the * receiving application consumes new data. * * Verification Tag: 8.5 Verification Tag [Normal verification] * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; sctp_arg_t force = SCTP_NOFORCE(); int error; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); error = sctp_eat_data(asoc, chunk, commands); switch (error) { case SCTP_IERROR_NO_ERROR: break; case SCTP_IERROR_HIGH_TSN: case SCTP_IERROR_BAD_STREAM: SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS); goto discard_noforce; case SCTP_IERROR_DUP_TSN: case SCTP_IERROR_IGNORE_TSN: SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS); goto discard_force; case SCTP_IERROR_NO_DATA: goto consume; case SCTP_IERROR_PROTO_VIOLATION: return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); default: BUG(); } if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) force = SCTP_FORCE(); if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); } /* If this is the last chunk in a packet, we need to count it * toward sack generation. Note that we need to SACK every * OTHER packet containing data chunks, EVEN IF WE DISCARD * THEM. We elect to NOT generate SACK's if the chunk fails * the verification tag test. * * RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks * * The SCTP endpoint MUST always acknowledge the reception of * each valid DATA chunk. * * The guidelines on delayed acknowledgement algorithm * specified in Section 4.2 of [RFC2581] SHOULD be followed. * Specifically, an acknowledgement SHOULD be generated for at * least every second packet (not every second DATA chunk) * received, and SHOULD be generated within 200 ms of the * arrival of any unacknowledged DATA chunk. In some * situations it may be beneficial for an SCTP transmitter to * be more conservative than the algorithms detailed in this * document allow. However, an SCTP transmitter MUST NOT be * more aggressive than the following algorithms allow. */ if (chunk->end_of_packet) sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force); return SCTP_DISPOSITION_CONSUME; discard_force: /* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks * * When a packet arrives with duplicate DATA chunk(s) and with * no new DATA chunk(s), the endpoint MUST immediately send a * SACK with no delay. If a packet arrives with duplicate * DATA chunk(s) bundled with new DATA chunks, the endpoint * MAY immediately send a SACK. Normally receipt of duplicate * DATA chunks will occur when the original SACK chunk was lost * and the peer's RTO has expired. The duplicate TSN number(s) * SHOULD be reported in the SACK as duplicate. */ /* In our case, we split the MAY SACK advice up whether or not * the last chunk is a duplicate.' */ if (chunk->end_of_packet) sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); return SCTP_DISPOSITION_DISCARD; discard_noforce: if (chunk->end_of_packet) sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force); return SCTP_DISPOSITION_DISCARD; consume: return SCTP_DISPOSITION_CONSUME; } /* * sctp_sf_eat_data_fast_4_4 * * Section: 4 (4) * (4) In SHUTDOWN-SENT state the endpoint MUST acknowledge any received * DATA chunks without delay. * * Verification Tag: 8.5 Verification Tag [Normal verification] * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; int error; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); error = sctp_eat_data(asoc, chunk, commands); switch (error) { case SCTP_IERROR_NO_ERROR: case SCTP_IERROR_HIGH_TSN: case SCTP_IERROR_DUP_TSN: case SCTP_IERROR_IGNORE_TSN: case SCTP_IERROR_BAD_STREAM: break; case SCTP_IERROR_NO_DATA: goto consume; case SCTP_IERROR_PROTO_VIOLATION: return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); default: BUG(); } /* Go a head and force a SACK, since we are shutting down. */ /* Implementor's Guide. * * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately * respond to each received packet containing one or more DATA chunk(s) * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer */ if (chunk->end_of_packet) { /* We must delay the chunk creation since the cumulative * TSN has not been updated yet. */ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); } consume: return SCTP_DISPOSITION_CONSUME; } /* * Section: 6.2 Processing a Received SACK * D) Any time a SACK arrives, the endpoint performs the following: * * i) If Cumulative TSN Ack is less than the Cumulative TSN Ack Point, * then drop the SACK. Since Cumulative TSN Ack is monotonically * increasing, a SACK whose Cumulative TSN Ack is less than the * Cumulative TSN Ack Point indicates an out-of-order SACK. * * ii) Set rwnd equal to the newly received a_rwnd minus the number * of bytes still outstanding after processing the Cumulative TSN Ack * and the Gap Ack Blocks. * * iii) If the SACK is missing a TSN that was previously * acknowledged via a Gap Ack Block (e.g., the data receiver * reneged on the data), then mark the corresponding DATA chunk * as available for retransmit: Mark it as missing for fast * retransmit as described in Section 7.2.4 and if no retransmit * timer is running for the destination address to which the DATA * chunk was originally transmitted, then T3-rtx is started for * that destination address. * * Verification Tag: 8.5 Verification Tag [Normal verification] * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; sctp_sackhdr_t *sackh; __u32 ctsn; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the SACK chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Pull the SACK chunk from the data buffer */ sackh = sctp_sm_pull_sack(chunk); /* Was this a bogus SACK? */ if (!sackh) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); chunk->subh.sack_hdr = sackh; ctsn = ntohl(sackh->cum_tsn_ack); /* i) If Cumulative TSN Ack is less than the Cumulative TSN * Ack Point, then drop the SACK. Since Cumulative TSN * Ack is monotonically increasing, a SACK whose * Cumulative TSN Ack is less than the Cumulative TSN Ack * Point indicates an out-of-order SACK. */ if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, asoc->ctsn_ack_point); return SCTP_DISPOSITION_DISCARD; } /* If Cumulative TSN Ack beyond the max tsn currently * send, terminating the association and respond to the * sender with an ABORT. */ if (!TSN_lt(ctsn, asoc->next_tsn)) return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); /* Return this SACK for further processing. */ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk)); /* Note: We do the rest of the work on the PROCESS_SACK * sideeffect. */ return SCTP_DISPOSITION_CONSUME; } /* * Generate an ABORT in response to a packet. * * Section: 8.4 Handle "Out of the blue" Packets, sctpimpguide 2.41 * * 8) The receiver should respond to the sender of the OOTB packet with * an ABORT. When sending the ABORT, the receiver of the OOTB packet * MUST fill in the Verification Tag field of the outbound packet * with the value found in the Verification Tag field of the OOTB * packet and set the T-bit in the Chunk Flags to indicate that the * Verification Tag is reflected. After sending this ABORT, the * receiver of the OOTB packet shall discard the OOTB packet and take * no further action. * * Verification Tag: * * The return value is the disposition of the chunk. */ static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_packet *packet = NULL; struct sctp_chunk *chunk = arg; struct sctp_chunk *abort; packet = sctp_ootb_pkt_new(net, asoc, chunk); if (packet) { /* Make an ABORT. The T bit will be set if the asoc * is NULL. */ abort = sctp_make_abort(asoc, chunk, 0); if (!abort) { sctp_ootb_pkt_free(packet); return SCTP_DISPOSITION_NOMEM; } /* Reflect vtag if T-Bit is set */ if (sctp_test_T_bit(abort)) packet->vtag = ntohl(chunk->sctp_hdr->vtag); /* Set the skb to the belonging sock for accounting. */ abort->skb->sk = ep->base.sk; sctp_packet_append_chunk(packet, abort); sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); return SCTP_DISPOSITION_CONSUME; } return SCTP_DISPOSITION_NOMEM; } /* * Received an ERROR chunk from peer. Generate SCTP_REMOTE_ERROR * event as ULP notification for each cause included in the chunk. * * API 5.3.1.3 - SCTP_REMOTE_ERROR * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_operr_notify(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; sctp_errhdr_t *err; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the ERROR chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); sctp_walk_errors(err, chunk->chunk_hdr); if ((void *)err != (void *)chunk->chunk_end) return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, (void *)err, commands); sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, SCTP_CHUNK(chunk)); return SCTP_DISPOSITION_CONSUME; } /* * Process an inbound SHUTDOWN ACK. * * From Section 9.2: * Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall * stop the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its * peer, and remove all record of the association. * * The return value is the disposition. */ sctp_disposition_t sctp_sf_do_9_2_final(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *reply; struct sctp_ulpevent *ev; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* 10.2 H) SHUTDOWN COMPLETE notification * * When SCTP completes the shutdown procedures (section 9.2) this * notification is passed to the upper layer. */ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, 0, 0, 0, NULL, GFP_ATOMIC); if (!ev) goto nomem; /* ...send a SHUTDOWN COMPLETE chunk to its peer, */ reply = sctp_make_shutdown_complete(asoc, chunk); if (!reply) goto nomem_chunk; /* Do all the commands now (after allocation), so that we * have consistent state if memory allocation failes */ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); /* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall * stop the T2-shutdown timer, */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); /* ...and remove all record of the association. */ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); return SCTP_DISPOSITION_DELETE_TCB; nomem_chunk: sctp_ulpevent_free(ev); nomem: return SCTP_DISPOSITION_NOMEM; } /* * RFC 2960, 8.4 - Handle "Out of the blue" Packets, sctpimpguide 2.41. * * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB * packet must fill in the Verification Tag field of the outbound * packet with the Verification Tag received in the SHUTDOWN ACK and * set the T-bit in the Chunk Flags to indicate that the Verification * Tag is reflected. * * 8) The receiver should respond to the sender of the OOTB packet with * an ABORT. When sending the ABORT, the receiver of the OOTB packet * MUST fill in the Verification Tag field of the outbound packet * with the value found in the Verification Tag field of the OOTB * packet and set the T-bit in the Chunk Flags to indicate that the * Verification Tag is reflected. After sending this ABORT, the * receiver of the OOTB packet shall discard the OOTB packet and take * no further action. */ sctp_disposition_t sctp_sf_ootb(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sk_buff *skb = chunk->skb; sctp_chunkhdr_t *ch; sctp_errhdr_t *err; __u8 *ch_end; int ootb_shut_ack = 0; int ootb_cookie_ack = 0; SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; do { /* Report violation if the chunk is less then minimal */ if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Now that we know we at least have a chunk header, * do things that are type appropriate. */ if (SCTP_CID_SHUTDOWN_ACK == ch->type) ootb_shut_ack = 1; /* RFC 2960, Section 3.3.7 * Moreover, under any circumstances, an endpoint that * receives an ABORT MUST NOT respond to that ABORT by * sending an ABORT of its own. */ if (SCTP_CID_ABORT == ch->type) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR * or a COOKIE ACK the SCTP Packet should be silently * discarded. */ if (SCTP_CID_COOKIE_ACK == ch->type) ootb_cookie_ack = 1; if (SCTP_CID_ERROR == ch->type) { sctp_walk_errors(err, ch) { if (SCTP_ERROR_STALE_COOKIE == err->cause) { ootb_cookie_ack = 1; break; } } } /* Report violation if chunk len overflows */ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); if (ch_end > skb_tail_pointer(skb)) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); ch = (sctp_chunkhdr_t *) ch_end; } while (ch_end < skb_tail_pointer(skb)); if (ootb_shut_ack) return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands); else if (ootb_cookie_ack) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); else return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); } /* * Handle an "Out of the blue" SHUTDOWN ACK. * * Section: 8.4 5, sctpimpguide 2.41. * * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB * packet must fill in the Verification Tag field of the outbound * packet with the Verification Tag received in the SHUTDOWN ACK and * set the T-bit in the Chunk Flags to indicate that the Verification * Tag is reflected. * * Inputs * (endpoint, asoc, type, arg, commands) * * Outputs * (sctp_disposition_t) * * The return value is the disposition of the chunk. */ static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_packet *packet = NULL; struct sctp_chunk *chunk = arg; struct sctp_chunk *shut; packet = sctp_ootb_pkt_new(net, asoc, chunk); if (packet) { /* Make an SHUTDOWN_COMPLETE. * The T bit will be set if the asoc is NULL. */ shut = sctp_make_shutdown_complete(asoc, chunk); if (!shut) { sctp_ootb_pkt_free(packet); return SCTP_DISPOSITION_NOMEM; } /* Reflect vtag if T-Bit is set */ if (sctp_test_T_bit(shut)) packet->vtag = ntohl(chunk->sctp_hdr->vtag); /* Set the skb to the belonging sock for accounting. */ shut->skb->sk = ep->base.sk; sctp_packet_append_chunk(packet, shut); sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); /* If the chunk length is invalid, we don't want to process * the reset of the packet. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* We need to discard the rest of the packet to prevent * potential bomming attacks from additional bundled chunks. * This is documented in SCTP Threats ID. */ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } return SCTP_DISPOSITION_NOMEM; } /* * Handle SHUTDOWN ACK in COOKIE_ECHOED or COOKIE_WAIT state. * * Verification Tag: 8.5.1 E) Rules for packet carrying a SHUTDOWN ACK * If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state the * procedures in section 8.4 SHOULD be followed, in other words it * should be treated as an Out Of The Blue packet. * [This means that we do NOT check the Verification Tag on these * chunks. --piggy ] * */ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Although we do have an association in this case, it corresponds * to a restarted association. So the packet is treated as an OOTB * packet and the state function that handles OOTB SHUTDOWN_ACK is * called with a NULL association. */ SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands); } /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */ sctp_disposition_t sctp_sf_do_asconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *asconf_ack = NULL; struct sctp_paramhdr *err_param = NULL; sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; __u32 serial; int length; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* ADD-IP: Section 4.1.1 * This chunk MUST be sent in an authenticated way by using * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk * is received unauthenticated it MUST be silently discarded as * described in [I-D.ietf-tsvwg-sctp-auth]. */ if (!net->sctp.addip_noauth && !chunk->auth) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); /* Make sure that the ASCONF ADDIP chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); hdr = (sctp_addiphdr_t *)chunk->skb->data; serial = ntohl(hdr->serial); addr_param = (union sctp_addr_param *)hdr->params; length = ntohs(addr_param->p.length); if (length < sizeof(sctp_paramhdr_t)) return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, (void *)addr_param, commands); /* Verify the ASCONF chunk before processing it. */ if (!sctp_verify_asconf(asoc, (sctp_paramhdr_t *)((void *)addr_param + length), (void *)chunk->chunk_end, &err_param)) return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, (void *)err_param, commands); /* ADDIP 5.2 E1) Compare the value of the serial number to the value * the endpoint stored in a new association variable * 'Peer-Serial-Number'. */ if (serial == asoc->peer.addip_serial + 1) { /* If this is the first instance of ASCONF in the packet, * we can clean our old ASCONF-ACKs. */ if (!chunk->has_asconf) sctp_assoc_clean_asconf_ack_cache(asoc); /* ADDIP 5.2 E4) When the Sequence Number matches the next one * expected, process the ASCONF as described below and after * processing the ASCONF Chunk, append an ASCONF-ACK Chunk to * the response packet and cache a copy of it (in the event it * later needs to be retransmitted). * * Essentially, do V1-V5. */ asconf_ack = sctp_process_asconf((struct sctp_association *) asoc, chunk); if (!asconf_ack) return SCTP_DISPOSITION_NOMEM; } else if (serial < asoc->peer.addip_serial + 1) { /* ADDIP 5.2 E2) * If the value found in the Sequence Number is less than the * ('Peer- Sequence-Number' + 1), simply skip to the next * ASCONF, and include in the outbound response packet * any previously cached ASCONF-ACK response that was * sent and saved that matches the Sequence Number of the * ASCONF. Note: It is possible that no cached ASCONF-ACK * Chunk exists. This will occur when an older ASCONF * arrives out of order. In such a case, the receiver * should skip the ASCONF Chunk and not include ASCONF-ACK * Chunk for that chunk. */ asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); if (!asconf_ack) return SCTP_DISPOSITION_DISCARD; /* Reset the transport so that we select the correct one * this time around. This is to make sure that we don't * accidentally use a stale transport that's been removed. */ asconf_ack->transport = NULL; } else { /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since * it must be either a stale packet or from an attacker. */ return SCTP_DISPOSITION_DISCARD; } /* ADDIP 5.2 E6) The destination address of the SCTP packet * containing the ASCONF-ACK Chunks MUST be the source address of * the SCTP packet that held the ASCONF Chunks. * * To do this properly, we'll set the destination address of the chunk * and at the transmit time, will try look up the transport to use. * Since ASCONFs may be bundled, the correct transport may not be * created until we process the entire packet, thus this workaround. */ asconf_ack->dest = chunk->source; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); if (asoc->new_transport) { sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands); ((struct sctp_association *)asoc)->new_transport = NULL; } return SCTP_DISPOSITION_CONSUME; } /* * ADDIP Section 4.3 General rules for address manipulation * When building TLV parameters for the ASCONF Chunk that will add or * delete IP addresses the D0 to D13 rules should be applied: */ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *asconf_ack = arg; struct sctp_chunk *last_asconf = asoc->addip_last_asconf; struct sctp_chunk *abort; struct sctp_paramhdr *err_param = NULL; sctp_addiphdr_t *addip_hdr; __u32 sent_serial, rcvd_serial; if (!sctp_vtag_verify(asconf_ack, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* ADD-IP, Section 4.1.2: * This chunk MUST be sent in an authenticated way by using * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk * is received unauthenticated it MUST be silently discarded as * described in [I-D.ietf-tsvwg-sctp-auth]. */ if (!net->sctp.addip_noauth && !asconf_ack->auth) return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); /* Make sure that the ADDIP chunk has a valid length. */ if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; rcvd_serial = ntohl(addip_hdr->serial); /* Verify the ASCONF-ACK chunk before processing it. */ if (!sctp_verify_asconf(asoc, (sctp_paramhdr_t *)addip_hdr->params, (void *)asconf_ack->chunk_end, &err_param)) return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, (void *)err_param, commands); if (last_asconf) { addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; sent_serial = ntohl(addip_hdr->serial); } else { sent_serial = asoc->addip_serial - 1; } /* D0) If an endpoint receives an ASCONF-ACK that is greater than or * equal to the next serial number to be used but no ASCONF chunk is * outstanding the endpoint MUST ABORT the association. Note that a * sequence number is greater than if it is no more than 2^^31-1 * larger than the current sequence number (using serial arithmetic). */ if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) && !(asoc->addip_last_asconf)) { abort = sctp_make_abort(asoc, asconf_ack, sizeof(sctp_errhdr_t)); if (abort) { sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); } /* We are going to ABORT, so we might as well stop * processing the rest of the chunks in the packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_ABORT; } if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); if (!sctp_process_asconf_ack((struct sctp_association *)asoc, asconf_ack)) { /* Successfully processed ASCONF_ACK. We can * release the next asconf if we have one. */ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF, SCTP_NULL()); return SCTP_DISPOSITION_CONSUME; } abort = sctp_make_abort(asoc, asconf_ack, sizeof(sctp_errhdr_t)); if (abort) { sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); } /* We are going to ABORT, so we might as well stop * processing the rest of the chunks in the packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_ABORT; } return SCTP_DISPOSITION_DISCARD; } /* * PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP * * When a FORWARD TSN chunk arrives, the data receiver MUST first update * its cumulative TSN point to the value carried in the FORWARD TSN * chunk, and then MUST further advance its cumulative TSN point locally * if possible. * After the above processing, the data receiver MUST stop reporting any * missing TSNs earlier than or equal to the new cumulative TSN point. * * Verification Tag: 8.5 Verification Tag [Normal verification] * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_hdr *fwdtsn_hdr; struct sctp_fwdtsn_skip *skip; __u16 len; __u32 tsn; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* Make sure that the FORWARD_TSN chunk has valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; chunk->subh.fwdtsn_hdr = fwdtsn_hdr; len = ntohs(chunk->chunk_hdr->length); len -= sizeof(struct sctp_chunkhdr); skb_pull(chunk->skb, len); tsn = ntohl(fwdtsn_hdr->new_cum_tsn); pr_debug("%s: TSN 0x%x\n", __func__, tsn); /* The TSN is too high--silently discard the chunk and count on it * getting retransmitted later. */ if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) goto discard_noforce; /* Silently discard the chunk if stream-id is not valid */ sctp_walk_fwdtsn(skip, chunk) { if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) goto discard_noforce; } sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); if (len > sizeof(struct sctp_fwdtsn_hdr)) sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, SCTP_CHUNK(chunk)); /* Count this as receiving DATA. */ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); } /* FIXME: For now send a SACK, but DATA processing may * send another. */ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); return SCTP_DISPOSITION_CONSUME; discard_noforce: return SCTP_DISPOSITION_DISCARD; } sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_hdr *fwdtsn_hdr; struct sctp_fwdtsn_skip *skip; __u16 len; __u32 tsn; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* Make sure that the FORWARD_TSN chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; chunk->subh.fwdtsn_hdr = fwdtsn_hdr; len = ntohs(chunk->chunk_hdr->length); len -= sizeof(struct sctp_chunkhdr); skb_pull(chunk->skb, len); tsn = ntohl(fwdtsn_hdr->new_cum_tsn); pr_debug("%s: TSN 0x%x\n", __func__, tsn); /* The TSN is too high--silently discard the chunk and count on it * getting retransmitted later. */ if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) goto gen_shutdown; /* Silently discard the chunk if stream-id is not valid */ sctp_walk_fwdtsn(skip, chunk) { if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) goto gen_shutdown; } sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); if (len > sizeof(struct sctp_fwdtsn_hdr)) sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, SCTP_CHUNK(chunk)); /* Go a head and force a SACK, since we are shutting down. */ gen_shutdown: /* Implementor's Guide. * * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately * respond to each received packet containing one or more DATA chunk(s) * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer */ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); return SCTP_DISPOSITION_CONSUME; } /* * SCTP-AUTH Section 6.3 Receiving authenticated chukns * * The receiver MUST use the HMAC algorithm indicated in the HMAC * Identifier field. If this algorithm was not specified by the * receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk * during association setup, the AUTH chunk and all chunks after it MUST * be discarded and an ERROR chunk SHOULD be sent with the error cause * defined in Section 4.1. * * If an endpoint with no shared key receives a Shared Key Identifier * other than 0, it MUST silently discard all authenticated chunks. If * the endpoint has at least one endpoint pair shared key for the peer, * it MUST use the key specified by the Shared Key Identifier if a * key has been configured for that Shared Key Identifier. If no * endpoint pair shared key has been configured for that Shared Key * Identifier, all authenticated chunks MUST be silently discarded. * * Verification Tag: 8.5 Verification Tag [Normal verification] * * The return value is the disposition of the chunk. */ static sctp_ierror_t sctp_sf_authenticate(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, struct sctp_chunk *chunk) { struct sctp_authhdr *auth_hdr; struct sctp_hmac *hmac; unsigned int sig_len; __u16 key_id; __u8 *save_digest; __u8 *digest; /* Pull in the auth header, so we can do some more verification */ auth_hdr = (struct sctp_authhdr *)chunk->skb->data; chunk->subh.auth_hdr = auth_hdr; skb_pull(chunk->skb, sizeof(struct sctp_authhdr)); /* Make sure that we support the HMAC algorithm from the auth * chunk. */ if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id)) return SCTP_IERROR_AUTH_BAD_HMAC; /* Make sure that the provided shared key identifier has been * configured */ key_id = ntohs(auth_hdr->shkey_id); if (key_id != asoc->active_key_id && !sctp_auth_get_shkey(asoc, key_id)) return SCTP_IERROR_AUTH_BAD_KEYID; /* Make sure that the length of the signature matches what * we expect. */ sig_len = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_auth_chunk_t); hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id)); if (sig_len != hmac->hmac_len) return SCTP_IERROR_PROTO_VIOLATION; /* Now that we've done validation checks, we can compute and * verify the hmac. The steps involved are: * 1. Save the digest from the chunk. * 2. Zero out the digest in the chunk. * 3. Compute the new digest * 4. Compare saved and new digests. */ digest = auth_hdr->hmac; skb_pull(chunk->skb, sig_len); save_digest = kmemdup(digest, sig_len, GFP_ATOMIC); if (!save_digest) goto nomem; memset(digest, 0, sig_len); sctp_auth_calculate_hmac(asoc, chunk->skb, (struct sctp_auth_chunk *)chunk->chunk_hdr, GFP_ATOMIC); /* Discard the packet if the digests do not match */ if (memcmp(save_digest, digest, sig_len)) { kfree(save_digest); return SCTP_IERROR_BAD_SIG; } kfree(save_digest); chunk->auth = 1; return SCTP_IERROR_NO_ERROR; nomem: return SCTP_IERROR_NOMEM; } sctp_disposition_t sctp_sf_eat_auth(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_authhdr *auth_hdr; struct sctp_chunk *chunk = arg; struct sctp_chunk *err_chunk; sctp_ierror_t error; /* Make sure that the peer has AUTH capable */ if (!asoc->peer.auth_capable) return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands); if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } /* Make sure that the AUTH chunk has valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); auth_hdr = (struct sctp_authhdr *)chunk->skb->data; error = sctp_sf_authenticate(net, ep, asoc, type, chunk); switch (error) { case SCTP_IERROR_AUTH_BAD_HMAC: /* Generate the ERROR chunk and discard the rest * of the packet */ err_chunk = sctp_make_op_error(asoc, chunk, SCTP_ERROR_UNSUP_HMAC, &auth_hdr->hmac_id, sizeof(__u16), 0); if (err_chunk) { sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err_chunk)); } /* Fall Through */ case SCTP_IERROR_AUTH_BAD_KEYID: case SCTP_IERROR_BAD_SIG: return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); case SCTP_IERROR_PROTO_VIOLATION: return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); case SCTP_IERROR_NOMEM: return SCTP_DISPOSITION_NOMEM; default: /* Prevent gcc warnings */ break; } if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) { struct sctp_ulpevent *ev; ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id), SCTP_AUTH_NEWKEY, GFP_ATOMIC); if (!ev) return -ENOMEM; sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); } return SCTP_DISPOSITION_CONSUME; } /* * Process an unknown chunk. * * Section: 3.2. Also, 2.1 in the implementor's guide. * * Chunk Types are encoded such that the highest-order two bits specify * the action that must be taken if the processing endpoint does not * recognize the Chunk Type. * * 00 - Stop processing this SCTP packet and discard it, do not process * any further chunks within it. * * 01 - Stop processing this SCTP packet and discard it, do not process * any further chunks within it, and report the unrecognized * chunk in an 'Unrecognized Chunk Type'. * * 10 - Skip this chunk and continue processing. * * 11 - Skip this chunk and continue processing, but report in an ERROR * Chunk using the 'Unrecognized Chunk Type' cause of error. * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_unk_chunk(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *unk_chunk = arg; struct sctp_chunk *err_chunk; sctp_chunkhdr_t *hdr; pr_debug("%s: processing unknown chunk id:%d\n", __func__, type.chunk); if (!sctp_vtag_verify(unk_chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the chunk has a valid length. * Since we don't know the chunk type, we use a general * chunkhdr structure to make a comparison. */ if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); switch (type.chunk & SCTP_CID_ACTION_MASK) { case SCTP_CID_ACTION_DISCARD: /* Discard the packet. */ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); case SCTP_CID_ACTION_DISCARD_ERR: /* Generate an ERROR chunk as response. */ hdr = unk_chunk->chunk_hdr; err_chunk = sctp_make_op_error(asoc, unk_chunk, SCTP_ERROR_UNKNOWN_CHUNK, hdr, WORD_ROUND(ntohs(hdr->length)), 0); if (err_chunk) { sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err_chunk)); } /* Discard the packet. */ sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); return SCTP_DISPOSITION_CONSUME; case SCTP_CID_ACTION_SKIP: /* Skip the chunk. */ return SCTP_DISPOSITION_DISCARD; case SCTP_CID_ACTION_SKIP_ERR: /* Generate an ERROR chunk as response. */ hdr = unk_chunk->chunk_hdr; err_chunk = sctp_make_op_error(asoc, unk_chunk, SCTP_ERROR_UNKNOWN_CHUNK, hdr, WORD_ROUND(ntohs(hdr->length)), 0); if (err_chunk) { sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err_chunk)); } /* Skip the chunk. */ return SCTP_DISPOSITION_CONSUME; default: break; } return SCTP_DISPOSITION_DISCARD; } /* * Discard the chunk. * * Section: 0.2, 5.2.3, 5.2.5, 5.2.6, 6.0, 8.4.6, 8.5.1c, 9.2 * [Too numerous to mention...] * Verification Tag: No verification needed. * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_discard_chunk(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; /* Make sure that the chunk has a valid length. * Since we don't know the chunk type, we use a general * chunkhdr structure to make a comparison. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); pr_debug("%s: chunk:%d is discarded\n", __func__, type.chunk); return SCTP_DISPOSITION_DISCARD; } /* * Discard the whole packet. * * Section: 8.4 2) * * 2) If the OOTB packet contains an ABORT chunk, the receiver MUST * silently discard the OOTB packet and take no further action. * * Verification Tag: No verification necessary * * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_pdiscard(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS); sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); return SCTP_DISPOSITION_CONSUME; } /* * The other end is violating protocol. * * Section: Not specified * Verification Tag: Not specified * Inputs * (endpoint, asoc, chunk) * * Outputs * (asoc, reply_msg, msg_up, timers, counters) * * We simply tag the chunk as a violation. The state machine will log * the violation and continue. */ sctp_disposition_t sctp_sf_violation(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; /* Make sure that the chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); return SCTP_DISPOSITION_VIOLATION; } /* * Common function to handle a protocol violation. */ static sctp_disposition_t sctp_sf_abort_violation( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, void *arg, sctp_cmd_seq_t *commands, const __u8 *payload, const size_t paylen) { struct sctp_packet *packet = NULL; struct sctp_chunk *chunk = arg; struct sctp_chunk *abort = NULL; /* SCTP-AUTH, Section 6.3: * It should be noted that if the receiver wants to tear * down an association in an authenticated way only, the * handling of malformed packets should not result in * tearing down the association. * * This means that if we only want to abort associations * in an authenticated way (i.e AUTH+ABORT), then we * can't destroy this association just because the packet * was malformed. */ if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) goto discard; /* Make the abort chunk. */ abort = sctp_make_abort_violation(asoc, chunk, payload, paylen); if (!abort) goto nomem; if (asoc) { /* Treat INIT-ACK as a special case during COOKIE-WAIT. */ if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK && !asoc->peer.i.init_tag) { sctp_initack_chunk_t *initack; initack = (sctp_initack_chunk_t *)chunk->chunk_hdr; if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T; else { unsigned int inittag; inittag = ntohl(initack->init_hdr.init_tag); sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_INITTAG, SCTP_U32(inittag)); } } sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNREFUSED)); sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); } else { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); } } else { packet = sctp_ootb_pkt_new(net, asoc, chunk); if (!packet) goto nomem_pkt; if (sctp_test_T_bit(abort)) packet->vtag = ntohl(chunk->sctp_hdr->vtag); abort->skb->sk = ep->base.sk; sctp_packet_append_chunk(packet, abort); sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); } SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); discard: sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands); return SCTP_DISPOSITION_ABORT; nomem_pkt: sctp_chunk_free(abort); nomem: return SCTP_DISPOSITION_NOMEM; } /* * Handle a protocol violation when the chunk length is invalid. * "Invalid" length is identified as smaller than the minimal length a * given chunk can be. For example, a SACK chunk has invalid length * if its length is set to be smaller than the size of sctp_sack_chunk_t. * * We inform the other end by sending an ABORT with a Protocol Violation * error code. * * Section: Not specified * Verification Tag: Nothing to do * Inputs * (endpoint, asoc, chunk) * * Outputs * (reply_msg, msg_up, counters) * * Generate an ABORT chunk and terminate the association. */ static sctp_disposition_t sctp_sf_violation_chunklen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { static const char err_str[] = "The following chunk had invalid length:"; return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, sizeof(err_str)); } /* * Handle a protocol violation when the parameter length is invalid. * If the length is smaller than the minimum length of a given parameter, * or accumulated length in multi parameters exceeds the end of the chunk, * the length is considered as invalid. */ static sctp_disposition_t sctp_sf_violation_paramlen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, void *ext, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_paramhdr *param = ext; struct sctp_chunk *abort = NULL; if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) goto discard; /* Make the abort chunk. */ abort = sctp_make_violation_paramlen(asoc, chunk, param); if (!abort) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); discard: sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands); return SCTP_DISPOSITION_ABORT; nomem: return SCTP_DISPOSITION_NOMEM; } /* Handle a protocol violation when the peer trying to advance the * cumulative tsn ack to a point beyond the max tsn currently sent. * * We inform the other end by sending an ABORT with a Protocol Violation * error code. */ static sctp_disposition_t sctp_sf_violation_ctsn( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:"; return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, sizeof(err_str)); } /* Handle protocol violation of an invalid chunk bundling. For example, * when we have an association and we receive bundled INIT-ACK, or * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" * statement from the specs. Additionally, there might be an attacker * on the path and we may not want to continue this communication. */ static sctp_disposition_t sctp_sf_violation_chunk( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { static const char err_str[] = "The following chunk violates protocol:"; if (!asoc) return sctp_sf_violation(net, ep, asoc, type, arg, commands); return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, sizeof(err_str)); } /*************************************************************************** * These are the state functions for handling primitive (Section 10) events. ***************************************************************************/ /* * sctp_sf_do_prm_asoc * * Section: 10.1 ULP-to-SCTP * B) Associate * * Format: ASSOCIATE(local SCTP instance name, destination transport addr, * outbound stream count) * -> association id [,destination transport addr list] [,outbound stream * count] * * This primitive allows the upper layer to initiate an association to a * specific peer endpoint. * * The peer endpoint shall be specified by one of the transport addresses * which defines the endpoint (see Section 1.4). If the local SCTP * instance has not been initialized, the ASSOCIATE is considered an * error. * [This is not relevant for the kernel implementation since we do all * initialization at boot time. It we hadn't initialized we wouldn't * get anywhere near this code.] * * An association id, which is a local handle to the SCTP association, * will be returned on successful establishment of the association. If * SCTP is not able to open an SCTP association with the peer endpoint, * an error is returned. * [In the kernel implementation, the struct sctp_association needs to * be created BEFORE causing this primitive to run.] * * Other association parameters may be returned, including the * complete destination transport addresses of the peer as well as the * outbound stream count of the local endpoint. One of the transport * address from the returned destination addresses will be selected by * the local endpoint as default primary path for sending SCTP packets * to this peer. The returned "destination transport addr list" can * be used by the ULP to change the default primary path or to force * sending a packet to a specific transport address. [All of this * stuff happens when the INIT ACK arrives. This is a NON-BLOCKING * function.] * * Mandatory attributes: * * o local SCTP instance name - obtained from the INITIALIZE operation. * [This is the argument asoc.] * o destination transport addr - specified as one of the transport * addresses of the peer endpoint with which the association is to be * established. * [This is asoc->peer.active_path.] * o outbound stream count - the number of outbound streams the ULP * would like to open towards this peer endpoint. * [BUG: This is not currently implemented.] * Optional attributes: * * None. * * The return value is a disposition. */ sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *repl; struct sctp_association *my_asoc; /* The comment below says that we enter COOKIE-WAIT AFTER * sending the INIT, but that doesn't actually work in our * implementation... */ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_COOKIE_WAIT)); /* RFC 2960 5.1 Normal Establishment of an Association * * A) "A" first sends an INIT chunk to "Z". In the INIT, "A" * must provide its Verification Tag (Tag_A) in the Initiate * Tag field. Tag_A SHOULD be a random number in the range of * 1 to 4294967295 (see 5.3.1 for Tag value selection). ... */ repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0); if (!repl) goto nomem; /* Choose transport for INIT. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT, SCTP_CHUNK(repl)); /* Cast away the const modifier, as we want to just * rerun it through as a sideffect. */ my_asoc = (struct sctp_association *)asoc; sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc)); /* After sending the INIT, "A" starts the T1-init timer and * enters the COOKIE-WAIT state. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; } /* * Process the SEND primitive. * * Section: 10.1 ULP-to-SCTP * E) Send * * Format: SEND(association id, buffer address, byte count [,context] * [,stream id] [,life time] [,destination transport address] * [,unorder flag] [,no-bundle flag] [,payload protocol-id] ) * -> result * * This is the main method to send user data via SCTP. * * Mandatory attributes: * * o association id - local handle to the SCTP association * * o buffer address - the location where the user message to be * transmitted is stored; * * o byte count - The size of the user data in number of bytes; * * Optional attributes: * * o context - an optional 32 bit integer that will be carried in the * sending failure notification to the ULP if the transportation of * this User Message fails. * * o stream id - to indicate which stream to send the data on. If not * specified, stream 0 will be used. * * o life time - specifies the life time of the user data. The user data * will not be sent by SCTP after the life time expires. This * parameter can be used to avoid efforts to transmit stale * user messages. SCTP notifies the ULP if the data cannot be * initiated to transport (i.e. sent to the destination via SCTP's * send primitive) within the life time variable. However, the * user data will be transmitted if SCTP has attempted to transmit a * chunk before the life time expired. * * o destination transport address - specified as one of the destination * transport addresses of the peer endpoint to which this packet * should be sent. Whenever possible, SCTP should use this destination * transport address for sending the packets, instead of the current * primary path. * * o unorder flag - this flag, if present, indicates that the user * would like the data delivered in an unordered fashion to the peer * (i.e., the U flag is set to 1 on all DATA chunks carrying this * message). * * o no-bundle flag - instructs SCTP not to bundle this user data with * other outbound DATA chunks. SCTP MAY still bundle even when * this flag is present, when faced with network congestion. * * o payload protocol-id - A 32 bit unsigned integer that is to be * passed to the peer indicating the type of payload protocol data * being transmitted. This value is passed as opaque data by SCTP. * * The return value is the disposition. */ sctp_disposition_t sctp_sf_do_prm_send(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_datamsg *msg = arg; sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg)); return SCTP_DISPOSITION_CONSUME; } /* * Process the SHUTDOWN primitive. * * Section: 10.1: * C) Shutdown * * Format: SHUTDOWN(association id) * -> result * * Gracefully closes an association. Any locally queued user data * will be delivered to the peer. The association will be terminated only * after the peer acknowledges all the SCTP packets sent. A success code * will be returned on successful termination of the association. If * attempting to terminate the association results in a failure, an error * code shall be returned. * * Mandatory attributes: * * o association id - local handle to the SCTP association * * Optional attributes: * * None. * * The return value is the disposition. */ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { int disposition; /* From 9.2 Shutdown of an Association * Upon receipt of the SHUTDOWN primitive from its upper * layer, the endpoint enters SHUTDOWN-PENDING state and * remains there until all outstanding data has been * acknowledged by its peer. The endpoint accepts no new data * from its upper layer, but retransmits data to the far end * if necessary to fill gaps. */ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING)); disposition = SCTP_DISPOSITION_CONSUME; if (sctp_outq_is_empty(&asoc->outqueue)) { disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, arg, commands); } return disposition; } /* * Process the ABORT primitive. * * Section: 10.1: * C) Abort * * Format: Abort(association id [, cause code]) * -> result * * Ungracefully closes an association. Any locally queued user data * will be discarded and an ABORT chunk is sent to the peer. A success code * will be returned on successful abortion of the association. If * attempting to abort the association results in a failure, an error * code shall be returned. * * Mandatory attributes: * * o association id - local handle to the SCTP association * * Optional attributes: * * o cause code - reason of the abort to be passed to the peer * * None. * * The return value is the disposition. */ sctp_disposition_t sctp_sf_do_9_1_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* From 9.1 Abort of an Association * Upon receipt of the ABORT primitive from its upper * layer, the endpoint enters CLOSED state and * discard all outstanding data has been * acknowledged by its peer. The endpoint accepts no new data * from its upper layer, but retransmits data to the far end * if necessary to fill gaps. */ struct sctp_chunk *abort = arg; sctp_disposition_t retval; retval = SCTP_DISPOSITION_CONSUME; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); /* Even if we can't send the ABORT due to low memory delete the * TCB. This is a departure from our typical NOMEM handling. */ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); /* Delete the established association. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_USER_ABORT)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return retval; } /* We tried an illegal operation on an association which is closed. */ sctp_disposition_t sctp_sf_error_closed(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL)); return SCTP_DISPOSITION_CONSUME; } /* We tried an illegal operation on an association which is shutting * down. */ sctp_disposition_t sctp_sf_error_shutdown(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-ESHUTDOWN)); return SCTP_DISPOSITION_CONSUME; } /* * sctp_cookie_wait_prm_shutdown * * Section: 4 Note: 2 * Verification Tag: * Inputs * (endpoint, asoc) * * The RFC does not explicitly address this issue, but is the route through the * state table when someone issues a shutdown while in COOKIE_WAIT state. * * Outputs * (timers) */ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); return SCTP_DISPOSITION_DELETE_TCB; } /* * sctp_cookie_echoed_prm_shutdown * * Section: 4 Note: 2 * Verification Tag: * Inputs * (endpoint, asoc) * * The RFC does not explcitly address this issue, but is the route through the * state table when someone issues a shutdown while in COOKIE_ECHOED state. * * Outputs * (timers) */ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* There is a single T1 timer, so we should be able to use * common function with the COOKIE-WAIT state. */ return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands); } /* * sctp_sf_cookie_wait_prm_abort * * Section: 4 Note: 2 * Verification Tag: * Inputs * (endpoint, asoc) * * The RFC does not explicitly address this issue, but is the route through the * state table when someone issues an abort while in COOKIE_WAIT state. * * Outputs * (timers) */ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *abort = arg; sctp_disposition_t retval; /* Stop T1-init timer */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); retval = SCTP_DISPOSITION_CONSUME; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); /* Even if we can't send the ABORT due to low memory delete the * TCB. This is a departure from our typical NOMEM handling. */ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNREFUSED)); /* Delete the established association. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_PERR(SCTP_ERROR_USER_ABORT)); return retval; } /* * sctp_sf_cookie_echoed_prm_abort * * Section: 4 Note: 3 * Verification Tag: * Inputs * (endpoint, asoc) * * The RFC does not explcitly address this issue, but is the route through the * state table when someone issues an abort while in COOKIE_ECHOED state. * * Outputs * (timers) */ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* There is a single T1 timer, so we should be able to use * common function with the COOKIE-WAIT state. */ return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands); } /* * sctp_sf_shutdown_pending_prm_abort * * Inputs * (endpoint, asoc) * * The RFC does not explicitly address this issue, but is the route through the * state table when someone issues an abort while in SHUTDOWN-PENDING state. * * Outputs * (timers) */ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* Stop the T5-shutdown guard timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands); } /* * sctp_sf_shutdown_sent_prm_abort * * Inputs * (endpoint, asoc) * * The RFC does not explicitly address this issue, but is the route through the * state table when someone issues an abort while in SHUTDOWN-SENT state. * * Outputs * (timers) */ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* Stop the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); /* Stop the T5-shutdown guard timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands); } /* * sctp_sf_cookie_echoed_prm_abort * * Inputs * (endpoint, asoc) * * The RFC does not explcitly address this issue, but is the route through the * state table when someone issues an abort while in COOKIE_ECHOED state. * * Outputs * (timers) */ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* The same T2 timer, so we should be able to use * common function with the SHUTDOWN-SENT state. */ return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands); } /* * Process the REQUESTHEARTBEAT primitive * * 10.1 ULP-to-SCTP * J) Request Heartbeat * * Format: REQUESTHEARTBEAT(association id, destination transport address) * * -> result * * Instructs the local endpoint to perform a HeartBeat on the specified * destination transport address of the given association. The returned * result should indicate whether the transmission of the HEARTBEAT * chunk to the destination address is successful. * * Mandatory attributes: * * o association id - local handle to the SCTP association * * o destination transport address - the transport address of the * association on which a heartbeat should be issued. */ sctp_disposition_t sctp_sf_do_prm_requestheartbeat( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type, (struct sctp_transport *)arg, commands)) return SCTP_DISPOSITION_NOMEM; /* * RFC 2960 (bis), section 8.3 * * D) Request an on-demand HEARTBEAT on a specific destination * transport address of a given association. * * The endpoint should increment the respective error counter of * the destination transport address each time a HEARTBEAT is sent * to that address and not acknowledged within one RTO. * */ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, SCTP_TRANSPORT(arg)); return SCTP_DISPOSITION_CONSUME; } /* * ADDIP Section 4.1 ASCONF Chunk Procedures * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do A1 to A9 */ sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); return SCTP_DISPOSITION_CONSUME; } /* * Ignore the primitive event * * The return value is the disposition of the primitive. */ sctp_disposition_t sctp_sf_ignore_primitive( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { pr_debug("%s: primitive type:%d is ignored\n", __func__, type.primitive); return SCTP_DISPOSITION_DISCARD; } /*************************************************************************** * These are the state functions for the OTHER events. ***************************************************************************/ /* * When the SCTP stack has no more user data to send or retransmit, this * notification is given to the user. Also, at the time when a user app * subscribes to this event, if there is no data to be sent or * retransmit, the stack will immediately send up this notification. */ sctp_disposition_t sctp_sf_do_no_pending_tsn( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_ulpevent *event; event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); if (!event) return SCTP_DISPOSITION_NOMEM; sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event)); return SCTP_DISPOSITION_CONSUME; } /* * Start the shutdown negotiation. * * From Section 9.2: * Once all its outstanding data has been acknowledged, the endpoint * shall send a SHUTDOWN chunk to its peer including in the Cumulative * TSN Ack field the last sequential TSN it has received from the peer. * It shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT * state. If the timer expires, the endpoint must re-send the SHUTDOWN * with the updated last sequential TSN received from its peer. * * The return value is the disposition. */ sctp_disposition_t sctp_sf_do_9_2_start_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *reply; /* Once all its outstanding data has been acknowledged, the * endpoint shall send a SHUTDOWN chunk to its peer including * in the Cumulative TSN Ack field the last sequential TSN it * has received from the peer. */ reply = sctp_make_shutdown(asoc, NULL); if (!reply) goto nomem; /* Set the transport for the SHUTDOWN chunk and the timeout for the * T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); /* It shall then start the T2-shutdown timer */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); /* RFC 4960 Section 9.2 * The sender of the SHUTDOWN MAY also start an overall guard timer * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); /* and enter the SHUTDOWN-SENT state. */ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_SHUTDOWN_SENT)); /* sctp-implguide 2.10 Issues with Heartbeating and failover * * HEARTBEAT ... is discontinued after sending either SHUTDOWN * or SHUTDOWN-ACK. */ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; } /* * Generate a SHUTDOWN ACK now that everything is SACK'd. * * From Section 9.2: * * If it has no more outstanding DATA chunks, the SHUTDOWN receiver * shall send a SHUTDOWN ACK and start a T2-shutdown timer of its own, * entering the SHUTDOWN-ACK-SENT state. If the timer expires, the * endpoint must re-send the SHUTDOWN ACK. * * The return value is the disposition. */ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = (struct sctp_chunk *) arg; struct sctp_chunk *reply; /* There are 2 ways of getting here: * 1) called in response to a SHUTDOWN chunk * 2) called when SCTP_EVENT_NO_PENDING_TSN event is issued. * * For the case (2), the arg parameter is set to NULL. We need * to check that we have a chunk before accessing it's fields. */ if (chunk) { if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the SHUTDOWN chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); } /* If it has no more outstanding DATA chunks, the SHUTDOWN receiver * shall send a SHUTDOWN ACK ... */ reply = sctp_make_shutdown_ack(asoc, chunk); if (!reply) goto nomem; /* Set the transport for the SHUTDOWN ACK chunk and the timeout for * the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); /* and start/restart a T2-shutdown timer of its own, */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); /* Enter the SHUTDOWN-ACK-SENT state. */ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_SHUTDOWN_ACK_SENT)); /* sctp-implguide 2.10 Issues with Heartbeating and failover * * HEARTBEAT ... is discontinued after sending either SHUTDOWN * or SHUTDOWN-ACK. */ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; } /* * Ignore the event defined as other * * The return value is the disposition of the event. */ sctp_disposition_t sctp_sf_ignore_other(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { pr_debug("%s: the event other type:%d is ignored\n", __func__, type.other); return SCTP_DISPOSITION_DISCARD; } /************************************************************ * These are the state functions for handling timeout events. ************************************************************/ /* * RTX Timeout * * Section: 6.3.3 Handle T3-rtx Expiration * * Whenever the retransmission timer T3-rtx expires for a destination * address, do the following: * [See below] * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_transport *transport = arg; SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); if (asoc->overall_error_count >= asoc->max_retrans) { if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { /* * We are here likely because the receiver had its rwnd * closed for a while and we have not been able to * transmit the locally queued data within the maximum * retransmission attempts limit. Start the T5 * shutdown guard timer to give the receiver one last * chance and some additional time to recover before * aborting. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); } else { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_NO_ERROR)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_DELETE_TCB; } } /* E1) For the destination address for which the timer * expires, adjust its ssthresh with rules defined in Section * 7.2.3 and set the cwnd <- MTU. */ /* E2) For the destination address for which the timer * expires, set RTO <- RTO * 2 ("back off the timer"). The * maximum value discussed in rule C7 above (RTO.max) may be * used to provide an upper bound to this doubling operation. */ /* E3) Determine how many of the earliest (i.e., lowest TSN) * outstanding DATA chunks for the address for which the * T3-rtx has expired will fit into a single packet, subject * to the MTU constraint for the path corresponding to the * destination transport address to which the retransmission * is being sent (this may be different from the address for * which the timer expires [see Section 6.4]). Call this * value K. Bundle and retransmit those K DATA chunks in a * single packet to the destination endpoint. * * Note: Any DATA chunks that were sent to the address for * which the T3-rtx timer expired but did not fit in one MTU * (rule E3 above), should be marked for retransmission and * sent as soon as cwnd allows (normally when a SACK arrives). */ /* Do some failure management (Section 8.2). */ sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); /* NB: Rules E4 and F1 are implicit in R1. */ sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport)); return SCTP_DISPOSITION_CONSUME; } /* * Generate delayed SACK on timeout * * Section: 6.2 Acknowledgement on Reception of DATA Chunks * * The guidelines on delayed acknowledgement algorithm specified in * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an * acknowledgement SHOULD be generated for at least every second packet * (not every second DATA chunk) received, and SHOULD be generated * within 200 ms of the arrival of any unacknowledged DATA chunk. In * some situations it may be beneficial for an SCTP transmitter to be * more conservative than the algorithms detailed in this document * allow. However, an SCTP transmitter MUST NOT be more aggressive than * the following algorithms allow. */ sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS); sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); return SCTP_DISPOSITION_CONSUME; } /* * sctp_sf_t1_init_timer_expire * * Section: 4 Note: 2 * Verification Tag: * Inputs * (endpoint, asoc) * * RFC 2960 Section 4 Notes * 2) If the T1-init timer expires, the endpoint MUST retransmit INIT * and re-start the T1-init timer without changing state. This MUST * be repeated up to 'Max.Init.Retransmits' times. After that, the * endpoint MUST abort the initialization process and report the * error to SCTP user. * * Outputs * (timers, events) * */ sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *repl = NULL; struct sctp_bind_addr *bp; int attempts = asoc->init_err_counter + 1; pr_debug("%s: timer T1 expired (INIT)\n", __func__); SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS); if (attempts <= asoc->max_init_attempts) { bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); if (!repl) return SCTP_DISPOSITION_NOMEM; /* Choose transport for INIT. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT, SCTP_CHUNK(repl)); /* Issue a sideeffect to do the needed accounting. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); } else { pr_debug("%s: giving up on INIT, attempts:%d " "max_init_attempts:%d\n", __func__, attempts, asoc->max_init_attempts); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_PERR(SCTP_ERROR_NO_ERROR)); return SCTP_DISPOSITION_DELETE_TCB; } return SCTP_DISPOSITION_CONSUME; } /* * sctp_sf_t1_cookie_timer_expire * * Section: 4 Note: 2 * Verification Tag: * Inputs * (endpoint, asoc) * * RFC 2960 Section 4 Notes * 3) If the T1-cookie timer expires, the endpoint MUST retransmit * COOKIE ECHO and re-start the T1-cookie timer without changing * state. This MUST be repeated up to 'Max.Init.Retransmits' times. * After that, the endpoint MUST abort the initialization process and * report the error to SCTP user. * * Outputs * (timers, events) * */ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *repl = NULL; int attempts = asoc->init_err_counter + 1; pr_debug("%s: timer T1 expired (COOKIE-ECHO)\n", __func__); SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS); if (attempts <= asoc->max_init_attempts) { repl = sctp_make_cookie_echo(asoc, NULL); if (!repl) return SCTP_DISPOSITION_NOMEM; sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT, SCTP_CHUNK(repl)); /* Issue a sideeffect to do the needed accounting. */ sctp_add_cmd_sf(commands, SCTP_CMD_COOKIEECHO_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); } else { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_PERR(SCTP_ERROR_NO_ERROR)); return SCTP_DISPOSITION_DELETE_TCB; } return SCTP_DISPOSITION_CONSUME; } /* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN * with the updated last sequential TSN received from its peer. * * An endpoint should limit the number of retransmissions of the * SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'. * If this threshold is exceeded the endpoint should destroy the TCB and * MUST report the peer endpoint unreachable to the upper layer (and * thus the association enters the CLOSED state). The reception of any * packet from its peer (i.e. as the peer sends all of its queued DATA * chunks) should clear the endpoint's retransmission count and restart * the T2-Shutdown timer, giving its peer ample opportunity to transmit * all of its queued DATA chunks that have not yet been sent. */ sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *reply = NULL; pr_debug("%s: timer T2 expired\n", __func__); SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS); ((struct sctp_association *)asoc)->shutdown_retries++; if (asoc->overall_error_count >= asoc->max_retrans) { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_NO_ERROR)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_DELETE_TCB; } switch (asoc->state) { case SCTP_STATE_SHUTDOWN_SENT: reply = sctp_make_shutdown(asoc, NULL); break; case SCTP_STATE_SHUTDOWN_ACK_SENT: reply = sctp_make_shutdown_ack(asoc, NULL); break; default: BUG(); break; } if (!reply) goto nomem; /* Do some failure management (Section 8.2). * If we remove the transport an SHUTDOWN was last sent to, don't * do failure management. */ if (asoc->shutdown_last_sent_to) sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(asoc->shutdown_last_sent_to)); /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for * the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); /* Restart the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); return SCTP_DISPOSITION_CONSUME; nomem: return SCTP_DISPOSITION_NOMEM; } /* * ADDIP Section 4.1 ASCONF CHunk Procedures * If the T4 RTO timer expires the endpoint should do B1 to B5 */ sctp_disposition_t sctp_sf_t4_timer_expire( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = asoc->addip_last_asconf; struct sctp_transport *transport = chunk->transport; SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS); /* ADDIP 4.1 B1) Increment the error counters and perform path failure * detection on the appropriate destination address as defined in * RFC2960 [5] section 8.1 and 8.2. */ if (transport) sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); /* Reconfig T4 timer and transport. */ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); /* ADDIP 4.1 B2) Increment the association error counters and perform * endpoint failure detection on the association as defined in * RFC2960 [5] section 8.1 and 8.2. * association error counter is incremented in SCTP_CMD_STRIKE. */ if (asoc->overall_error_count >= asoc->max_retrans) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_NO_ERROR)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_ABORT; } /* ADDIP 4.1 B3) Back-off the destination address RTO value to which * the ASCONF chunk was sent by doubling the RTO timer value. * This is done in SCTP_CMD_STRIKE. */ /* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible * choose an alternate destination address (please refer to RFC2960 * [5] section 6.4.1). An endpoint MUST NOT add new parameters to this * chunk, it MUST be the same (including its serial number) as the last * ASCONF sent. */ sctp_chunk_hold(asoc->addip_last_asconf); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asoc->addip_last_asconf)); /* ADDIP 4.1 B5) Restart the T-4 RTO timer. Note that if a different * destination is selected, then the RTO used will be that of the new * destination address. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); return SCTP_DISPOSITION_CONSUME; } /* sctpimpguide-05 Section 2.12.2 * The sender of the SHUTDOWN MAY also start an overall guard timer * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. * At the expiration of this timer the sender SHOULD abort the association * by sending an ABORT chunk. */ sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *reply = NULL; pr_debug("%s: timer T5 expired\n", __func__); SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS); reply = sctp_make_abort(asoc, NULL, 0); if (!reply) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_NO_ERROR)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_DISPOSITION_DELETE_TCB; nomem: return SCTP_DISPOSITION_NOMEM; } /* Handle expiration of AUTOCLOSE timer. When the autoclose timer expires, * the association is automatically closed by starting the shutdown process. * The work that needs to be done is same as when SHUTDOWN is initiated by * the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown(). */ sctp_disposition_t sctp_sf_autoclose_timer_expire( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { int disposition; SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS); /* From 9.2 Shutdown of an Association * Upon receipt of the SHUTDOWN primitive from its upper * layer, the endpoint enters SHUTDOWN-PENDING state and * remains there until all outstanding data has been * acknowledged by its peer. The endpoint accepts no new data * from its upper layer, but retransmits data to the far end * if necessary to fill gaps. */ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING)); disposition = SCTP_DISPOSITION_CONSUME; if (sctp_outq_is_empty(&asoc->outqueue)) { disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, arg, commands); } return disposition; } /***************************************************************************** * These are sa state functions which could apply to all types of events. ****************************************************************************/ /* * This table entry is not implemented. * * Inputs * (endpoint, asoc, chunk) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_not_impl(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { return SCTP_DISPOSITION_NOT_IMPL; } /* * This table entry represents a bug. * * Inputs * (endpoint, asoc, chunk) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_bug(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { return SCTP_DISPOSITION_BUG; } /* * This table entry represents the firing of a timer in the wrong state. * Since timer deletion cannot be guaranteed a timer 'may' end up firing * when the association is in the wrong state. This event should * be ignored, so as to prevent any rearming of the timer. * * Inputs * (endpoint, asoc, chunk) * * The return value is the disposition of the chunk. */ sctp_disposition_t sctp_sf_timer_ignore(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { pr_debug("%s: timer %d ignored\n", __func__, type.chunk); return SCTP_DISPOSITION_CONSUME; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* Pull the SACK chunk based on the SACK header. */ static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk) { struct sctp_sackhdr *sack; unsigned int len; __u16 num_blocks; __u16 num_dup_tsns; /* Protect ourselves from reading too far into * the skb from a bogus sender. */ sack = (struct sctp_sackhdr *) chunk->skb->data; num_blocks = ntohs(sack->num_gap_ack_blocks); num_dup_tsns = ntohs(sack->num_dup_tsns); len = sizeof(struct sctp_sackhdr); len += (num_blocks + num_dup_tsns) * sizeof(__u32); if (len > chunk->skb->len) return NULL; skb_pull(chunk->skb, len); return sack; } /* Create an ABORT packet to be sent as a response, with the specified * error causes. */ static struct sctp_packet *sctp_abort_pkt_new(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, const void *payload, size_t paylen) { struct sctp_packet *packet; struct sctp_chunk *abort; packet = sctp_ootb_pkt_new(net, asoc, chunk); if (packet) { /* Make an ABORT. * The T bit will be set if the asoc is NULL. */ abort = sctp_make_abort(asoc, chunk, paylen); if (!abort) { sctp_ootb_pkt_free(packet); return NULL; } /* Reflect vtag if T-Bit is set */ if (sctp_test_T_bit(abort)) packet->vtag = ntohl(chunk->sctp_hdr->vtag); /* Add specified error causes, i.e., payload, to the * end of the chunk. */ sctp_addto_chunk(abort, paylen, payload); /* Set the skb to the belonging sock for accounting. */ abort->skb->sk = ep->base.sk; sctp_packet_append_chunk(packet, abort); } return packet; } /* Allocate a packet for responding in the OOTB conditions. */ static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_packet *packet; struct sctp_transport *transport; __u16 sport; __u16 dport; __u32 vtag; /* Get the source and destination port from the inbound packet. */ sport = ntohs(chunk->sctp_hdr->dest); dport = ntohs(chunk->sctp_hdr->source); /* The V-tag is going to be the same as the inbound packet if no * association exists, otherwise, use the peer's vtag. */ if (asoc) { /* Special case the INIT-ACK as there is no peer's vtag * yet. */ switch (chunk->chunk_hdr->type) { case SCTP_CID_INIT_ACK: { sctp_initack_chunk_t *initack; initack = (sctp_initack_chunk_t *)chunk->chunk_hdr; vtag = ntohl(initack->init_hdr.init_tag); break; } default: vtag = asoc->peer.i.init_tag; break; } } else { /* Special case the INIT and stale COOKIE_ECHO as there is no * vtag yet. */ switch (chunk->chunk_hdr->type) { case SCTP_CID_INIT: { sctp_init_chunk_t *init; init = (sctp_init_chunk_t *)chunk->chunk_hdr; vtag = ntohl(init->init_hdr.init_tag); break; } default: vtag = ntohl(chunk->sctp_hdr->vtag); break; } } /* Make a transport for the bucket, Eliza... */ transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC); if (!transport) goto nomem; /* Cache a route for the transport with the chunk's destination as * the source address. */ sctp_transport_route(transport, (union sctp_addr *)&chunk->dest, sctp_sk(net->sctp.ctl_sock)); packet = sctp_packet_init(&transport->packet, transport, sport, dport); packet = sctp_packet_config(packet, vtag, 0); return packet; nomem: return NULL; } /* Free the packet allocated earlier for responding in the OOTB condition. */ void sctp_ootb_pkt_free(struct sctp_packet *packet) { sctp_transport_free(packet->transport); } /* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */ static void sctp_send_stale_cookie_err(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_chunk *err_chunk) { struct sctp_packet *packet; if (err_chunk) { packet = sctp_ootb_pkt_new(net, asoc, chunk); if (packet) { struct sctp_signed_cookie *cookie; /* Override the OOTB vtag from the cookie. */ cookie = chunk->subh.cookie_hdr; packet->vtag = cookie->c.peer_vtag; /* Set the skb to the belonging sock for accounting. */ err_chunk->skb->sk = ep->base.sk; sctp_packet_append_chunk(packet, err_chunk); sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); } else sctp_chunk_free (err_chunk); } } /* Process a data chunk */ static int sctp_eat_data(const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands) { sctp_datahdr_t *data_hdr; struct sctp_chunk *err; size_t datalen; sctp_verb_t deliver; int tmp; __u32 tsn; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); u16 ssn; u16 sid; u8 ordered = 0; data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); tsn = ntohl(data_hdr->tsn); pr_debug("%s: TSN 0x%x\n", __func__, tsn); /* ASSERT: Now skb->data is really the user data. */ /* Process ECN based congestion. * * Since the chunk structure is reused for all chunks within * a packet, we use ecn_ce_done to track if we've already * done CE processing for this packet. * * We need to do ECN processing even if we plan to discard the * chunk later. */ if (!chunk->ecn_ce_done) { struct sctp_af *af; chunk->ecn_ce_done = 1; af = sctp_get_af_specific( ipver2af(ip_hdr(chunk->skb)->version)); if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { /* Do real work as sideffect. */ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE, SCTP_U32(tsn)); } } tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn); if (tmp < 0) { /* The TSN is too high--silently discard the chunk and * count on it getting retransmitted later. */ if (chunk->asoc) chunk->asoc->stats.outofseqtsns++; return SCTP_IERROR_HIGH_TSN; } else if (tmp > 0) { /* This is a duplicate. Record it. */ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn)); return SCTP_IERROR_DUP_TSN; } /* This is a new TSN. */ /* Discard if there is no room in the receive window. * Actually, allow a little bit of overflow (up to a MTU). */ datalen = ntohs(chunk->chunk_hdr->length); datalen -= sizeof(sctp_data_chunk_t); deliver = SCTP_CMD_CHUNK_ULP; /* Think about partial delivery. */ if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { /* Even if we don't accept this chunk there is * memory pressure. */ sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL()); } /* Spill over rwnd a little bit. Note: While allowed, this spill over * seems a bit troublesome in that frag_point varies based on * PMTU. In cases, such as loopback, this might be a rather * large spill over. */ if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || (datalen > asoc->rwnd + asoc->frag_point))) { /* If this is the next TSN, consider reneging to make * room. Note: Playing nice with a confused sender. A * malicious sender can still eat up all our buffer * space and in the future we may want to detect and * do more drastic reneging. */ if (sctp_tsnmap_has_gap(map) && (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { pr_debug("%s: reneging for tsn:%u\n", __func__, tsn); deliver = SCTP_CMD_RENEGE; } else { pr_debug("%s: discard tsn:%u len:%zu, rwnd:%d\n", __func__, tsn, datalen, asoc->rwnd); return SCTP_IERROR_IGNORE_TSN; } } /* * Also try to renege to limit our memory usage in the event that * we are under memory pressure * If we can't renege, don't worry about it, the sk_rmem_schedule * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our * memory usage too much */ if (*sk->sk_prot_creator->memory_pressure) { if (sctp_tsnmap_has_gap(map) && (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { pr_debug("%s: under pressure, reneging for tsn:%u\n", __func__, tsn); deliver = SCTP_CMD_RENEGE; } } /* * Section 3.3.10.9 No User Data (9) * * Cause of error * --------------- * No User Data: This error cause is returned to the originator of a * DATA chunk if a received DATA chunk has no user data. */ if (unlikely(0 == datalen)) { err = sctp_make_abort_no_data(asoc, chunk, tsn); if (err) { sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err)); } /* We are going to ABORT, so we might as well stop * processing the rest of the chunks in the packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(SCTP_ERROR_NO_DATA)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); return SCTP_IERROR_NO_DATA; } chunk->data_accepted = 1; /* Note: Some chunks may get overcounted (if we drop) or overcounted * if we renege and the chunk arrives again. */ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS); if (chunk->asoc) chunk->asoc->stats.iuodchunks++; } else { SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS); if (chunk->asoc) chunk->asoc->stats.iodchunks++; ordered = 1; } /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * * If an endpoint receive a DATA chunk with an invalid stream * identifier, it shall acknowledge the reception of the DATA chunk * following the normal procedure, immediately send an ERROR chunk * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) * and discard the DATA chunk. */ sid = ntohs(data_hdr->stream); if (sid >= asoc->c.sinit_max_instreams) { /* Mark tsn as received even though we drop it */ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, &data_hdr->stream, sizeof(data_hdr->stream), sizeof(u16)); if (err) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err)); return SCTP_IERROR_BAD_STREAM; } /* Check to see if the SSN is possible for this TSN. * The biggest gap we can record is 4K wide. Since SSNs wrap * at an unsigned short, there is no way that an SSN can * wrap and for a valid TSN. We can simply check if the current * SSN is smaller then the next expected one. If it is, it wrapped * and is invalid. */ ssn = ntohs(data_hdr->ssn); if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) { return SCTP_IERROR_PROTO_VIOLATION; } /* Send the data up to the user. Note: Schedule the * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK * chunk needs the updated rwnd. */ sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk)); return SCTP_IERROR_NO_ERROR; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2166_2
crossvul-cpp_data_bad_2060_0
/*****************************************************************************/ /* * yam.c -- YAM radio modem driver. * * Copyright (C) 1998 Frederic Rible F1OAT (frible@teaser.fr) * Adapted from baycom.c driver written by Thomas Sailer (sailer@ife.ee.ethz.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Please note that the GPL allows you to use the driver, NOT the radio. * In order to use the radio, you need a license from the communications * authority of your country. * * * History: * 0.0 F1OAT 06.06.98 Begin of work with baycom.c source code V 0.3 * 0.1 F1OAT 07.06.98 Add timer polling routine for channel arbitration * 0.2 F6FBB 08.06.98 Added delay after FPGA programming * 0.3 F6FBB 29.07.98 Delayed PTT implementation for dupmode=2 * 0.4 F6FBB 30.07.98 Added TxTail, Slottime and Persistence * 0.5 F6FBB 01.08.98 Shared IRQs, /proc/net and network statistics * 0.6 F6FBB 25.08.98 Added 1200Bds format * 0.7 F6FBB 12.09.98 Added to the kernel configuration * 0.8 F6FBB 14.10.98 Fixed slottime/persistence timing bug * OK1ZIA 2.09.01 Fixed "kfree_skb on hard IRQ" * using dev_kfree_skb_any(). (important in 2.4 kernel) * */ /*****************************************************************************/ #include <linux/module.h> #include <linux/types.h> #include <linux/net.h> #include <linux/in.h> #include <linux/if.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/bitops.h> #include <linux/random.h> #include <asm/io.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/firmware.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <net/ax25.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/net_namespace.h> #include <asm/uaccess.h> #include <linux/init.h> #include <linux/yam.h> /* --------------------------------------------------------------------- */ static const char yam_drvname[] = "yam"; static const char yam_drvinfo[] __initconst = KERN_INFO \ "YAM driver version 0.8 by F1OAT/F6FBB\n"; /* --------------------------------------------------------------------- */ #define FIRMWARE_9600 "yam/9600.bin" #define FIRMWARE_1200 "yam/1200.bin" #define YAM_9600 1 #define YAM_1200 2 #define NR_PORTS 4 #define YAM_MAGIC 0xF10A7654 /* Transmitter states */ #define TX_OFF 0 #define TX_HEAD 1 #define TX_DATA 2 #define TX_CRC1 3 #define TX_CRC2 4 #define TX_TAIL 5 #define YAM_MAX_FRAME 1024 #define DEFAULT_BITRATE 9600 /* bps */ #define DEFAULT_HOLDD 10 /* sec */ #define DEFAULT_TXD 300 /* ms */ #define DEFAULT_TXTAIL 10 /* ms */ #define DEFAULT_SLOT 100 /* ms */ #define DEFAULT_PERS 64 /* 0->255 */ struct yam_port { int magic; int bitrate; int baudrate; int iobase; int irq; int dupmode; struct net_device *dev; int nb_rxint; int nb_mdint; /* Parameters section */ int txd; /* tx delay */ int holdd; /* duplex ptt delay */ int txtail; /* txtail delay */ int slot; /* slottime */ int pers; /* persistence */ /* Tx section */ int tx_state; int tx_count; int slotcnt; unsigned char tx_buf[YAM_MAX_FRAME]; int tx_len; int tx_crcl, tx_crch; struct sk_buff_head send_queue; /* Packets awaiting transmission */ /* Rx section */ int dcd; unsigned char rx_buf[YAM_MAX_FRAME]; int rx_len; int rx_crcl, rx_crch; }; struct yam_mcs { unsigned char bits[YAM_FPGA_SIZE]; int bitrate; struct yam_mcs *next; }; static struct net_device *yam_devs[NR_PORTS]; static struct yam_mcs *yam_data; static DEFINE_TIMER(yam_timer, NULL, 0, 0); /* --------------------------------------------------------------------- */ #define RBR(iobase) (iobase+0) #define THR(iobase) (iobase+0) #define IER(iobase) (iobase+1) #define IIR(iobase) (iobase+2) #define FCR(iobase) (iobase+2) #define LCR(iobase) (iobase+3) #define MCR(iobase) (iobase+4) #define LSR(iobase) (iobase+5) #define MSR(iobase) (iobase+6) #define SCR(iobase) (iobase+7) #define DLL(iobase) (iobase+0) #define DLM(iobase) (iobase+1) #define YAM_EXTENT 8 /* Interrupt Identification Register Bit Masks */ #define IIR_NOPEND 1 #define IIR_MSR 0 #define IIR_TX 2 #define IIR_RX 4 #define IIR_LSR 6 #define IIR_TIMEOUT 12 /* Fifo mode only */ #define IIR_MASK 0x0F /* Interrupt Enable Register Bit Masks */ #define IER_RX 1 /* enable rx interrupt */ #define IER_TX 2 /* enable tx interrupt */ #define IER_LSR 4 /* enable line status interrupts */ #define IER_MSR 8 /* enable modem status interrupts */ /* Modem Control Register Bit Masks */ #define MCR_DTR 0x01 /* DTR output */ #define MCR_RTS 0x02 /* RTS output */ #define MCR_OUT1 0x04 /* OUT1 output (not accessible in RS232) */ #define MCR_OUT2 0x08 /* Master Interrupt enable (must be set on PCs) */ #define MCR_LOOP 0x10 /* Loopback enable */ /* Modem Status Register Bit Masks */ #define MSR_DCTS 0x01 /* Delta CTS input */ #define MSR_DDSR 0x02 /* Delta DSR */ #define MSR_DRIN 0x04 /* Delta RI */ #define MSR_DDCD 0x08 /* Delta DCD */ #define MSR_CTS 0x10 /* CTS input */ #define MSR_DSR 0x20 /* DSR input */ #define MSR_RING 0x40 /* RI input */ #define MSR_DCD 0x80 /* DCD input */ /* line status register bit mask */ #define LSR_RXC 0x01 #define LSR_OE 0x02 #define LSR_PE 0x04 #define LSR_FE 0x08 #define LSR_BREAK 0x10 #define LSR_THRE 0x20 #define LSR_TSRE 0x40 /* Line Control Register Bit Masks */ #define LCR_DLAB 0x80 #define LCR_BREAK 0x40 #define LCR_PZERO 0x28 #define LCR_PEVEN 0x18 #define LCR_PODD 0x08 #define LCR_STOP1 0x00 #define LCR_STOP2 0x04 #define LCR_BIT5 0x00 #define LCR_BIT6 0x02 #define LCR_BIT7 0x01 #define LCR_BIT8 0x03 /* YAM Modem <-> UART Port mapping */ #define TX_RDY MSR_DCTS /* transmitter ready to send */ #define RX_DCD MSR_DCD /* carrier detect */ #define RX_FLAG MSR_RING /* hdlc flag received */ #define FPGA_DONE MSR_DSR /* FPGA is configured */ #define PTT_ON (MCR_RTS|MCR_OUT2) /* activate PTT */ #define PTT_OFF (MCR_DTR|MCR_OUT2) /* release PTT */ #define ENABLE_RXINT IER_RX /* enable uart rx interrupt during rx */ #define ENABLE_TXINT IER_MSR /* enable uart ms interrupt during tx */ #define ENABLE_RTXINT (IER_RX|IER_MSR) /* full duplex operations */ /************************************************************************* * CRC Tables ************************************************************************/ static const unsigned char chktabl[256] = {0x00, 0x89, 0x12, 0x9b, 0x24, 0xad, 0x36, 0xbf, 0x48, 0xc1, 0x5a, 0xd3, 0x6c, 0xe5, 0x7e, 0xf7, 0x81, 0x08, 0x93, 0x1a, 0xa5, 0x2c, 0xb7, 0x3e, 0xc9, 0x40, 0xdb, 0x52, 0xed, 0x64, 0xff, 0x76, 0x02, 0x8b, 0x10, 0x99, 0x26, 0xaf, 0x34, 0xbd, 0x4a, 0xc3, 0x58, 0xd1, 0x6e, 0xe7, 0x7c, 0xf5, 0x83, 0x0a, 0x91, 0x18, 0xa7, 0x2e, 0xb5, 0x3c, 0xcb, 0x42, 0xd9, 0x50, 0xef, 0x66, 0xfd, 0x74, 0x04, 0x8d, 0x16, 0x9f, 0x20, 0xa9, 0x32, 0xbb, 0x4c, 0xc5, 0x5e, 0xd7, 0x68, 0xe1, 0x7a, 0xf3, 0x85, 0x0c, 0x97, 0x1e, 0xa1, 0x28, 0xb3, 0x3a, 0xcd, 0x44, 0xdf, 0x56, 0xe9, 0x60, 0xfb, 0x72, 0x06, 0x8f, 0x14, 0x9d, 0x22, 0xab, 0x30, 0xb9, 0x4e, 0xc7, 0x5c, 0xd5, 0x6a, 0xe3, 0x78, 0xf1, 0x87, 0x0e, 0x95, 0x1c, 0xa3, 0x2a, 0xb1, 0x38, 0xcf, 0x46, 0xdd, 0x54, 0xeb, 0x62, 0xf9, 0x70, 0x08, 0x81, 0x1a, 0x93, 0x2c, 0xa5, 0x3e, 0xb7, 0x40, 0xc9, 0x52, 0xdb, 0x64, 0xed, 0x76, 0xff, 0x89, 0x00, 0x9b, 0x12, 0xad, 0x24, 0xbf, 0x36, 0xc1, 0x48, 0xd3, 0x5a, 0xe5, 0x6c, 0xf7, 0x7e, 0x0a, 0x83, 0x18, 0x91, 0x2e, 0xa7, 0x3c, 0xb5, 0x42, 0xcb, 0x50, 0xd9, 0x66, 0xef, 0x74, 0xfd, 0x8b, 0x02, 0x99, 0x10, 0xaf, 0x26, 0xbd, 0x34, 0xc3, 0x4a, 0xd1, 0x58, 0xe7, 0x6e, 0xf5, 0x7c, 0x0c, 0x85, 0x1e, 0x97, 0x28, 0xa1, 0x3a, 0xb3, 0x44, 0xcd, 0x56, 0xdf, 0x60, 0xe9, 0x72, 0xfb, 0x8d, 0x04, 0x9f, 0x16, 0xa9, 0x20, 0xbb, 0x32, 0xc5, 0x4c, 0xd7, 0x5e, 0xe1, 0x68, 0xf3, 0x7a, 0x0e, 0x87, 0x1c, 0x95, 0x2a, 0xa3, 0x38, 0xb1, 0x46, 0xcf, 0x54, 0xdd, 0x62, 0xeb, 0x70, 0xf9, 0x8f, 0x06, 0x9d, 0x14, 0xab, 0x22, 0xb9, 0x30, 0xc7, 0x4e, 0xd5, 0x5c, 0xe3, 0x6a, 0xf1, 0x78}; static const unsigned char chktabh[256] = {0x00, 0x11, 0x23, 0x32, 0x46, 0x57, 0x65, 0x74, 0x8c, 0x9d, 0xaf, 0xbe, 0xca, 0xdb, 0xe9, 0xf8, 0x10, 0x01, 0x33, 0x22, 0x56, 0x47, 0x75, 0x64, 0x9c, 0x8d, 0xbf, 0xae, 0xda, 0xcb, 0xf9, 0xe8, 0x21, 0x30, 0x02, 0x13, 0x67, 0x76, 0x44, 0x55, 0xad, 0xbc, 0x8e, 0x9f, 0xeb, 0xfa, 0xc8, 0xd9, 0x31, 0x20, 0x12, 0x03, 0x77, 0x66, 0x54, 0x45, 0xbd, 0xac, 0x9e, 0x8f, 0xfb, 0xea, 0xd8, 0xc9, 0x42, 0x53, 0x61, 0x70, 0x04, 0x15, 0x27, 0x36, 0xce, 0xdf, 0xed, 0xfc, 0x88, 0x99, 0xab, 0xba, 0x52, 0x43, 0x71, 0x60, 0x14, 0x05, 0x37, 0x26, 0xde, 0xcf, 0xfd, 0xec, 0x98, 0x89, 0xbb, 0xaa, 0x63, 0x72, 0x40, 0x51, 0x25, 0x34, 0x06, 0x17, 0xef, 0xfe, 0xcc, 0xdd, 0xa9, 0xb8, 0x8a, 0x9b, 0x73, 0x62, 0x50, 0x41, 0x35, 0x24, 0x16, 0x07, 0xff, 0xee, 0xdc, 0xcd, 0xb9, 0xa8, 0x9a, 0x8b, 0x84, 0x95, 0xa7, 0xb6, 0xc2, 0xd3, 0xe1, 0xf0, 0x08, 0x19, 0x2b, 0x3a, 0x4e, 0x5f, 0x6d, 0x7c, 0x94, 0x85, 0xb7, 0xa6, 0xd2, 0xc3, 0xf1, 0xe0, 0x18, 0x09, 0x3b, 0x2a, 0x5e, 0x4f, 0x7d, 0x6c, 0xa5, 0xb4, 0x86, 0x97, 0xe3, 0xf2, 0xc0, 0xd1, 0x29, 0x38, 0x0a, 0x1b, 0x6f, 0x7e, 0x4c, 0x5d, 0xb5, 0xa4, 0x96, 0x87, 0xf3, 0xe2, 0xd0, 0xc1, 0x39, 0x28, 0x1a, 0x0b, 0x7f, 0x6e, 0x5c, 0x4d, 0xc6, 0xd7, 0xe5, 0xf4, 0x80, 0x91, 0xa3, 0xb2, 0x4a, 0x5b, 0x69, 0x78, 0x0c, 0x1d, 0x2f, 0x3e, 0xd6, 0xc7, 0xf5, 0xe4, 0x90, 0x81, 0xb3, 0xa2, 0x5a, 0x4b, 0x79, 0x68, 0x1c, 0x0d, 0x3f, 0x2e, 0xe7, 0xf6, 0xc4, 0xd5, 0xa1, 0xb0, 0x82, 0x93, 0x6b, 0x7a, 0x48, 0x59, 0x2d, 0x3c, 0x0e, 0x1f, 0xf7, 0xe6, 0xd4, 0xc5, 0xb1, 0xa0, 0x92, 0x83, 0x7b, 0x6a, 0x58, 0x49, 0x3d, 0x2c, 0x1e, 0x0f}; /************************************************************************* * FPGA functions ************************************************************************/ static void delay(int ms) { unsigned long timeout = jiffies + ((ms * HZ) / 1000); while (time_before(jiffies, timeout)) cpu_relax(); } /* * reset FPGA */ static void fpga_reset(int iobase) { outb(0, IER(iobase)); outb(LCR_DLAB | LCR_BIT5, LCR(iobase)); outb(1, DLL(iobase)); outb(0, DLM(iobase)); outb(LCR_BIT5, LCR(iobase)); inb(LSR(iobase)); inb(MSR(iobase)); /* turn off FPGA supply voltage */ outb(MCR_OUT1 | MCR_OUT2, MCR(iobase)); delay(100); /* turn on FPGA supply voltage again */ outb(MCR_DTR | MCR_RTS | MCR_OUT1 | MCR_OUT2, MCR(iobase)); delay(100); } /* * send one byte to FPGA */ static int fpga_write(int iobase, unsigned char wrd) { unsigned char bit; int k; unsigned long timeout = jiffies + HZ / 10; for (k = 0; k < 8; k++) { bit = (wrd & 0x80) ? (MCR_RTS | MCR_DTR) : MCR_DTR; outb(bit | MCR_OUT1 | MCR_OUT2, MCR(iobase)); wrd <<= 1; outb(0xfc, THR(iobase)); while ((inb(LSR(iobase)) & LSR_TSRE) == 0) if (time_after(jiffies, timeout)) return -1; } return 0; } /* * predef should be 0 for loading user defined mcs * predef should be YAM_1200 for loading predef 1200 mcs * predef should be YAM_9600 for loading predef 9600 mcs */ static unsigned char *add_mcs(unsigned char *bits, int bitrate, unsigned int predef) { const char *fw_name[2] = {FIRMWARE_9600, FIRMWARE_1200}; const struct firmware *fw; struct platform_device *pdev; struct yam_mcs *p; int err; switch (predef) { case 0: fw = NULL; break; case YAM_1200: case YAM_9600: predef--; pdev = platform_device_register_simple("yam", 0, NULL, 0); if (IS_ERR(pdev)) { printk(KERN_ERR "yam: Failed to register firmware\n"); return NULL; } err = request_firmware(&fw, fw_name[predef], &pdev->dev); platform_device_unregister(pdev); if (err) { printk(KERN_ERR "Failed to load firmware \"%s\"\n", fw_name[predef]); return NULL; } if (fw->size != YAM_FPGA_SIZE) { printk(KERN_ERR "Bogus length %zu in firmware \"%s\"\n", fw->size, fw_name[predef]); release_firmware(fw); return NULL; } bits = (unsigned char *)fw->data; break; default: printk(KERN_ERR "yam: Invalid predef number %u\n", predef); return NULL; } /* If it already exists, replace the bit data */ p = yam_data; while (p) { if (p->bitrate == bitrate) { memcpy(p->bits, bits, YAM_FPGA_SIZE); goto out; } p = p->next; } /* Allocate a new mcs */ if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) { release_firmware(fw); return NULL; } memcpy(p->bits, bits, YAM_FPGA_SIZE); p->bitrate = bitrate; p->next = yam_data; yam_data = p; out: release_firmware(fw); return p->bits; } static unsigned char *get_mcs(int bitrate) { struct yam_mcs *p; p = yam_data; while (p) { if (p->bitrate == bitrate) return p->bits; p = p->next; } /* Load predefined mcs data */ switch (bitrate) { case 1200: /* setting predef as YAM_1200 for loading predef 1200 mcs */ return add_mcs(NULL, bitrate, YAM_1200); default: /* setting predef as YAM_9600 for loading predef 9600 mcs */ return add_mcs(NULL, bitrate, YAM_9600); } } /* * download bitstream to FPGA * data is contained in bits[] array in yam1200.h resp. yam9600.h */ static int fpga_download(int iobase, int bitrate) { int i, rc; unsigned char *pbits; pbits = get_mcs(bitrate); if (pbits == NULL) return -1; fpga_reset(iobase); for (i = 0; i < YAM_FPGA_SIZE; i++) { if (fpga_write(iobase, pbits[i])) { printk(KERN_ERR "yam: error in write cycle\n"); return -1; /* write... */ } } fpga_write(iobase, 0xFF); rc = inb(MSR(iobase)); /* check DONE signal */ /* Needed for some hardwares */ delay(50); return (rc & MSR_DSR) ? 0 : -1; } /************************************************************************ * Serial port init ************************************************************************/ static void yam_set_uart(struct net_device *dev) { struct yam_port *yp = netdev_priv(dev); int divisor = 115200 / yp->baudrate; outb(0, IER(dev->base_addr)); outb(LCR_DLAB | LCR_BIT8, LCR(dev->base_addr)); outb(divisor, DLL(dev->base_addr)); outb(0, DLM(dev->base_addr)); outb(LCR_BIT8, LCR(dev->base_addr)); outb(PTT_OFF, MCR(dev->base_addr)); outb(0x00, FCR(dev->base_addr)); /* Flush pending irq */ inb(RBR(dev->base_addr)); inb(MSR(dev->base_addr)); /* Enable rx irq */ outb(ENABLE_RTXINT, IER(dev->base_addr)); } /* --------------------------------------------------------------------- */ enum uart { c_uart_unknown, c_uart_8250, c_uart_16450, c_uart_16550, c_uart_16550A }; static const char *uart_str[] = {"unknown", "8250", "16450", "16550", "16550A"}; static enum uart yam_check_uart(unsigned int iobase) { unsigned char b1, b2, b3; enum uart u; enum uart uart_tab[] = {c_uart_16450, c_uart_unknown, c_uart_16550, c_uart_16550A}; b1 = inb(MCR(iobase)); outb(b1 | 0x10, MCR(iobase)); /* loopback mode */ b2 = inb(MSR(iobase)); outb(0x1a, MCR(iobase)); b3 = inb(MSR(iobase)) & 0xf0; outb(b1, MCR(iobase)); /* restore old values */ outb(b2, MSR(iobase)); if (b3 != 0x90) return c_uart_unknown; inb(RBR(iobase)); inb(RBR(iobase)); outb(0x01, FCR(iobase)); /* enable FIFOs */ u = uart_tab[(inb(IIR(iobase)) >> 6) & 3]; if (u == c_uart_16450) { outb(0x5a, SCR(iobase)); b1 = inb(SCR(iobase)); outb(0xa5, SCR(iobase)); b2 = inb(SCR(iobase)); if ((b1 != 0x5a) || (b2 != 0xa5)) u = c_uart_8250; } return u; } /****************************************************************************** * Rx Section ******************************************************************************/ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp) { if (yp->dcd && yp->rx_len >= 3 && yp->rx_len < YAM_MAX_FRAME) { int pkt_len = yp->rx_len - 2 + 1; /* -CRC + kiss */ struct sk_buff *skb; if ((yp->rx_crch & yp->rx_crcl) != 0xFF) { /* Bad crc */ } else { if (!(skb = dev_alloc_skb(pkt_len))) { printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name); ++dev->stats.rx_dropped; } else { unsigned char *cp; cp = skb_put(skb, pkt_len); *cp++ = 0; /* KISS kludge */ memcpy(cp, yp->rx_buf, pkt_len - 1); skb->protocol = ax25_type_trans(skb, dev); netif_rx(skb); ++dev->stats.rx_packets; } } } yp->rx_len = 0; yp->rx_crcl = 0x21; yp->rx_crch = 0xf3; } static inline void yam_rx_byte(struct net_device *dev, struct yam_port *yp, unsigned char rxb) { if (yp->rx_len < YAM_MAX_FRAME) { unsigned char c = yp->rx_crcl; yp->rx_crcl = (chktabl[c] ^ yp->rx_crch); yp->rx_crch = (chktabh[c] ^ rxb); yp->rx_buf[yp->rx_len++] = rxb; } } /******************************************************************************** * TX Section ********************************************************************************/ static void ptt_on(struct net_device *dev) { outb(PTT_ON, MCR(dev->base_addr)); } static void ptt_off(struct net_device *dev) { outb(PTT_OFF, MCR(dev->base_addr)); } static netdev_tx_t yam_send_packet(struct sk_buff *skb, struct net_device *dev) { struct yam_port *yp = netdev_priv(dev); skb_queue_tail(&yp->send_queue, skb); dev->trans_start = jiffies; return NETDEV_TX_OK; } static void yam_start_tx(struct net_device *dev, struct yam_port *yp) { if ((yp->tx_state == TX_TAIL) || (yp->txd == 0)) yp->tx_count = 1; else yp->tx_count = (yp->bitrate * yp->txd) / 8000; yp->tx_state = TX_HEAD; ptt_on(dev); } static void yam_arbitrate(struct net_device *dev) { struct yam_port *yp = netdev_priv(dev); if (yp->magic != YAM_MAGIC || yp->tx_state != TX_OFF || skb_queue_empty(&yp->send_queue)) return; /* tx_state is TX_OFF and there is data to send */ if (yp->dupmode) { /* Full duplex mode, don't wait */ yam_start_tx(dev, yp); return; } if (yp->dcd) { /* DCD on, wait slotime ... */ yp->slotcnt = yp->slot / 10; return; } /* Is slottime passed ? */ if ((--yp->slotcnt) > 0) return; yp->slotcnt = yp->slot / 10; /* is random > persist ? */ if ((prandom_u32() % 256) > yp->pers) return; yam_start_tx(dev, yp); } static void yam_dotimer(unsigned long dummy) { int i; for (i = 0; i < NR_PORTS; i++) { struct net_device *dev = yam_devs[i]; if (dev && netif_running(dev)) yam_arbitrate(dev); } yam_timer.expires = jiffies + HZ / 100; add_timer(&yam_timer); } static void yam_tx_byte(struct net_device *dev, struct yam_port *yp) { struct sk_buff *skb; unsigned char b, temp; switch (yp->tx_state) { case TX_OFF: break; case TX_HEAD: if (--yp->tx_count <= 0) { if (!(skb = skb_dequeue(&yp->send_queue))) { ptt_off(dev); yp->tx_state = TX_OFF; break; } yp->tx_state = TX_DATA; if (skb->data[0] != 0) { /* do_kiss_params(s, skb->data, skb->len); */ dev_kfree_skb_any(skb); break; } yp->tx_len = skb->len - 1; /* strip KISS byte */ if (yp->tx_len >= YAM_MAX_FRAME || yp->tx_len < 2) { dev_kfree_skb_any(skb); break; } skb_copy_from_linear_data_offset(skb, 1, yp->tx_buf, yp->tx_len); dev_kfree_skb_any(skb); yp->tx_count = 0; yp->tx_crcl = 0x21; yp->tx_crch = 0xf3; yp->tx_state = TX_DATA; } break; case TX_DATA: b = yp->tx_buf[yp->tx_count++]; outb(b, THR(dev->base_addr)); temp = yp->tx_crcl; yp->tx_crcl = chktabl[temp] ^ yp->tx_crch; yp->tx_crch = chktabh[temp] ^ b; if (yp->tx_count >= yp->tx_len) { yp->tx_state = TX_CRC1; } break; case TX_CRC1: yp->tx_crch = chktabl[yp->tx_crcl] ^ yp->tx_crch; yp->tx_crcl = chktabh[yp->tx_crcl] ^ chktabl[yp->tx_crch] ^ 0xff; outb(yp->tx_crcl, THR(dev->base_addr)); yp->tx_state = TX_CRC2; break; case TX_CRC2: outb(chktabh[yp->tx_crch] ^ 0xFF, THR(dev->base_addr)); if (skb_queue_empty(&yp->send_queue)) { yp->tx_count = (yp->bitrate * yp->txtail) / 8000; if (yp->dupmode == 2) yp->tx_count += (yp->bitrate * yp->holdd) / 8; if (yp->tx_count == 0) yp->tx_count = 1; yp->tx_state = TX_TAIL; } else { yp->tx_count = 1; yp->tx_state = TX_HEAD; } ++dev->stats.tx_packets; break; case TX_TAIL: if (--yp->tx_count <= 0) { yp->tx_state = TX_OFF; ptt_off(dev); } break; } } /*********************************************************************************** * ISR routine ************************************************************************************/ static irqreturn_t yam_interrupt(int irq, void *dev_id) { struct net_device *dev; struct yam_port *yp; unsigned char iir; int counter = 100; int i; int handled = 0; for (i = 0; i < NR_PORTS; i++) { dev = yam_devs[i]; yp = netdev_priv(dev); if (!netif_running(dev)) continue; while ((iir = IIR_MASK & inb(IIR(dev->base_addr))) != IIR_NOPEND) { unsigned char msr = inb(MSR(dev->base_addr)); unsigned char lsr = inb(LSR(dev->base_addr)); unsigned char rxb; handled = 1; if (lsr & LSR_OE) ++dev->stats.rx_fifo_errors; yp->dcd = (msr & RX_DCD) ? 1 : 0; if (--counter <= 0) { printk(KERN_ERR "%s: too many irq iir=%d\n", dev->name, iir); goto out; } if (msr & TX_RDY) { ++yp->nb_mdint; yam_tx_byte(dev, yp); } if (lsr & LSR_RXC) { ++yp->nb_rxint; rxb = inb(RBR(dev->base_addr)); if (msr & RX_FLAG) yam_rx_flag(dev, yp); else yam_rx_byte(dev, yp, rxb); } } } out: return IRQ_RETVAL(handled); } #ifdef CONFIG_PROC_FS static void *yam_seq_start(struct seq_file *seq, loff_t *pos) { return (*pos < NR_PORTS) ? yam_devs[*pos] : NULL; } static void *yam_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return (*pos < NR_PORTS) ? yam_devs[*pos] : NULL; } static void yam_seq_stop(struct seq_file *seq, void *v) { } static int yam_seq_show(struct seq_file *seq, void *v) { struct net_device *dev = v; const struct yam_port *yp = netdev_priv(dev); seq_printf(seq, "Device %s\n", dev->name); seq_printf(seq, " Up %d\n", netif_running(dev)); seq_printf(seq, " Speed %u\n", yp->bitrate); seq_printf(seq, " IoBase 0x%x\n", yp->iobase); seq_printf(seq, " BaudRate %u\n", yp->baudrate); seq_printf(seq, " IRQ %u\n", yp->irq); seq_printf(seq, " TxState %u\n", yp->tx_state); seq_printf(seq, " Duplex %u\n", yp->dupmode); seq_printf(seq, " HoldDly %u\n", yp->holdd); seq_printf(seq, " TxDelay %u\n", yp->txd); seq_printf(seq, " TxTail %u\n", yp->txtail); seq_printf(seq, " SlotTime %u\n", yp->slot); seq_printf(seq, " Persist %u\n", yp->pers); seq_printf(seq, " TxFrames %lu\n", dev->stats.tx_packets); seq_printf(seq, " RxFrames %lu\n", dev->stats.rx_packets); seq_printf(seq, " TxInt %u\n", yp->nb_mdint); seq_printf(seq, " RxInt %u\n", yp->nb_rxint); seq_printf(seq, " RxOver %lu\n", dev->stats.rx_fifo_errors); seq_printf(seq, "\n"); return 0; } static const struct seq_operations yam_seqops = { .start = yam_seq_start, .next = yam_seq_next, .stop = yam_seq_stop, .show = yam_seq_show, }; static int yam_info_open(struct inode *inode, struct file *file) { return seq_open(file, &yam_seqops); } static const struct file_operations yam_info_fops = { .owner = THIS_MODULE, .open = yam_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* --------------------------------------------------------------------- */ static int yam_open(struct net_device *dev) { struct yam_port *yp = netdev_priv(dev); enum uart u; int i; int ret=0; printk(KERN_INFO "Trying %s at iobase 0x%lx irq %u\n", dev->name, dev->base_addr, dev->irq); if (!yp->bitrate) return -ENXIO; if (!dev->base_addr || dev->base_addr > 0x1000 - YAM_EXTENT || dev->irq < 2 || dev->irq > 15) { return -ENXIO; } if (!request_region(dev->base_addr, YAM_EXTENT, dev->name)) { printk(KERN_ERR "%s: cannot 0x%lx busy\n", dev->name, dev->base_addr); return -EACCES; } if ((u = yam_check_uart(dev->base_addr)) == c_uart_unknown) { printk(KERN_ERR "%s: cannot find uart type\n", dev->name); ret = -EIO; goto out_release_base; } if (fpga_download(dev->base_addr, yp->bitrate)) { printk(KERN_ERR "%s: cannot init FPGA\n", dev->name); ret = -EIO; goto out_release_base; } outb(0, IER(dev->base_addr)); if (request_irq(dev->irq, yam_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq); ret = -EBUSY; goto out_release_base; } yam_set_uart(dev); netif_start_queue(dev); yp->slotcnt = yp->slot / 10; /* Reset overruns for all ports - FPGA programming makes overruns */ for (i = 0; i < NR_PORTS; i++) { struct net_device *yam_dev = yam_devs[i]; inb(LSR(yam_dev->base_addr)); yam_dev->stats.rx_fifo_errors = 0; } printk(KERN_INFO "%s at iobase 0x%lx irq %u uart %s\n", dev->name, dev->base_addr, dev->irq, uart_str[u]); return 0; out_release_base: release_region(dev->base_addr, YAM_EXTENT); return ret; } /* --------------------------------------------------------------------- */ static int yam_close(struct net_device *dev) { struct sk_buff *skb; struct yam_port *yp = netdev_priv(dev); if (!dev) return -EINVAL; /* * disable interrupts */ outb(0, IER(dev->base_addr)); outb(1, MCR(dev->base_addr)); /* Remove IRQ handler if last */ free_irq(dev->irq,dev); release_region(dev->base_addr, YAM_EXTENT); netif_stop_queue(dev); while ((skb = skb_dequeue(&yp->send_queue))) dev_kfree_skb(skb); printk(KERN_INFO "%s: close yam at iobase 0x%lx irq %u\n", yam_drvname, dev->base_addr, dev->irq); return 0; } /* --------------------------------------------------------------------- */ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct yam_port *yp = netdev_priv(dev); struct yamdrv_ioctl_cfg yi; struct yamdrv_ioctl_mcs *ym; int ioctl_cmd; if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int))) return -EFAULT; if (yp->magic != YAM_MAGIC) return -EINVAL; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (cmd != SIOCDEVPRIVATE) return -EINVAL; switch (ioctl_cmd) { case SIOCYAMRESERVED: return -EINVAL; /* unused */ case SIOCYAMSMCS: if (netif_running(dev)) return -EINVAL; /* Cannot change this parameter when up */ if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL) return -ENOBUFS; if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) { kfree(ym); return -EFAULT; } if (ym->bitrate > YAM_MAXBITRATE) { kfree(ym); return -EINVAL; } /* setting predef as 0 for loading userdefined mcs data */ add_mcs(ym->bits, ym->bitrate, 0); kfree(ym); break; case SIOCYAMSCFG: if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg))) return -EFAULT; if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev)) return -EINVAL; /* Cannot change this parameter when up */ if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev)) return -EINVAL; /* Cannot change this parameter when up */ if ((yi.cfg.mask & YAM_BITRATE) && netif_running(dev)) return -EINVAL; /* Cannot change this parameter when up */ if ((yi.cfg.mask & YAM_BAUDRATE) && netif_running(dev)) return -EINVAL; /* Cannot change this parameter when up */ if (yi.cfg.mask & YAM_IOBASE) { yp->iobase = yi.cfg.iobase; dev->base_addr = yi.cfg.iobase; } if (yi.cfg.mask & YAM_IRQ) { if (yi.cfg.irq > 15) return -EINVAL; yp->irq = yi.cfg.irq; dev->irq = yi.cfg.irq; } if (yi.cfg.mask & YAM_BITRATE) { if (yi.cfg.bitrate > YAM_MAXBITRATE) return -EINVAL; yp->bitrate = yi.cfg.bitrate; } if (yi.cfg.mask & YAM_BAUDRATE) { if (yi.cfg.baudrate > YAM_MAXBAUDRATE) return -EINVAL; yp->baudrate = yi.cfg.baudrate; } if (yi.cfg.mask & YAM_MODE) { if (yi.cfg.mode > YAM_MAXMODE) return -EINVAL; yp->dupmode = yi.cfg.mode; } if (yi.cfg.mask & YAM_HOLDDLY) { if (yi.cfg.holddly > YAM_MAXHOLDDLY) return -EINVAL; yp->holdd = yi.cfg.holddly; } if (yi.cfg.mask & YAM_TXDELAY) { if (yi.cfg.txdelay > YAM_MAXTXDELAY) return -EINVAL; yp->txd = yi.cfg.txdelay; } if (yi.cfg.mask & YAM_TXTAIL) { if (yi.cfg.txtail > YAM_MAXTXTAIL) return -EINVAL; yp->txtail = yi.cfg.txtail; } if (yi.cfg.mask & YAM_PERSIST) { if (yi.cfg.persist > YAM_MAXPERSIST) return -EINVAL; yp->pers = yi.cfg.persist; } if (yi.cfg.mask & YAM_SLOTTIME) { if (yi.cfg.slottime > YAM_MAXSLOTTIME) return -EINVAL; yp->slot = yi.cfg.slottime; yp->slotcnt = yp->slot / 10; } break; case SIOCYAMGCFG: yi.cfg.mask = 0xffffffff; yi.cfg.iobase = yp->iobase; yi.cfg.irq = yp->irq; yi.cfg.bitrate = yp->bitrate; yi.cfg.baudrate = yp->baudrate; yi.cfg.mode = yp->dupmode; yi.cfg.txdelay = yp->txd; yi.cfg.holddly = yp->holdd; yi.cfg.txtail = yp->txtail; yi.cfg.persist = yp->pers; yi.cfg.slottime = yp->slot; if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg))) return -EFAULT; break; default: return -EINVAL; } return 0; } /* --------------------------------------------------------------------- */ static int yam_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = (struct sockaddr *) addr; /* addr is an AX.25 shifted ASCII mac address */ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); return 0; } /* --------------------------------------------------------------------- */ static const struct net_device_ops yam_netdev_ops = { .ndo_open = yam_open, .ndo_stop = yam_close, .ndo_start_xmit = yam_send_packet, .ndo_do_ioctl = yam_ioctl, .ndo_set_mac_address = yam_set_mac_address, }; static void yam_setup(struct net_device *dev) { struct yam_port *yp = netdev_priv(dev); yp->magic = YAM_MAGIC; yp->bitrate = DEFAULT_BITRATE; yp->baudrate = DEFAULT_BITRATE * 2; yp->iobase = 0; yp->irq = 0; yp->dupmode = 0; yp->holdd = DEFAULT_HOLDD; yp->txd = DEFAULT_TXD; yp->txtail = DEFAULT_TXTAIL; yp->slot = DEFAULT_SLOT; yp->pers = DEFAULT_PERS; yp->dev = dev; dev->base_addr = yp->iobase; dev->irq = yp->irq; skb_queue_head_init(&yp->send_queue); dev->netdev_ops = &yam_netdev_ops; dev->header_ops = &ax25_header_ops; dev->type = ARPHRD_AX25; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->mtu = AX25_MTU; dev->addr_len = AX25_ADDR_LEN; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); } static int __init yam_init_driver(void) { struct net_device *dev; int i, err; char name[IFNAMSIZ]; printk(yam_drvinfo); for (i = 0; i < NR_PORTS; i++) { sprintf(name, "yam%d", i); dev = alloc_netdev(sizeof(struct yam_port), name, yam_setup); if (!dev) { pr_err("yam: cannot allocate net device\n"); err = -ENOMEM; goto error; } err = register_netdev(dev); if (err) { printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name); goto error; } yam_devs[i] = dev; } yam_timer.function = yam_dotimer; yam_timer.expires = jiffies + HZ / 100; add_timer(&yam_timer); proc_create("yam", S_IRUGO, init_net.proc_net, &yam_info_fops); return 0; error: while (--i >= 0) { unregister_netdev(yam_devs[i]); free_netdev(yam_devs[i]); } return err; } /* --------------------------------------------------------------------- */ static void __exit yam_cleanup_driver(void) { struct yam_mcs *p; int i; del_timer(&yam_timer); for (i = 0; i < NR_PORTS; i++) { struct net_device *dev = yam_devs[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } while (yam_data) { p = yam_data; yam_data = yam_data->next; kfree(p); } remove_proc_entry("yam", init_net.proc_net); } /* --------------------------------------------------------------------- */ MODULE_AUTHOR("Frederic Rible F1OAT frible@teaser.fr"); MODULE_DESCRIPTION("Yam amateur radio modem driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE_1200); MODULE_FIRMWARE(FIRMWARE_9600); module_init(yam_init_driver); module_exit(yam_cleanup_driver); /* --------------------------------------------------------------------- */
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2060_0
crossvul-cpp_data_good_1414_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char ) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image,ExceptionInfo *exception) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { char value[MagickPathExtent]; unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.x); (void) SetImageProperty(image,"tiff:XResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.y); (void) SetImageProperty(image,"tiff:YResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) *has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; if (type == 0) { if (packet_size == 1) SetPixelIndex(image,ScaleQuantumToChar(pixel),q); else SetPixelIndex(image,ScaleQuantumToShort(pixel),q); } color=image->colormap+(ssize_t) ConstrainColormapIndex(image, (ssize_t) GetPixelIndex(image,q),exception); if ((type == 0) && (channels > 1)) return; else color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } /* else if (packet_size == 4) { TODO: Figure out what to do there. } */ else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); /* TODO: Remove this when we figure out how to support this */ if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const ImageInfo *image_info, const size_t index) { if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,(size_t) count); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } if (size == 0) return(MagickTrue); layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].page.x=(ssize_t) ReadBlobSignedLong(image); y=(ssize_t) ReadBlobSignedLong(image); x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(image_info,i) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,(size_t) (psd_info.depth < 16 ? 256 : 65536),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(image,blocks,(size_t) length, &has_merged_image,exception); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if ((has_merged_image != MagickFalse) || (imageListLength == 1)) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) CHUNK; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) CHUNK-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_1414_0
crossvul-cpp_data_bad_3622_0
/* * kvm_ia64.c: Basic KVM suppport On Itanium series processors * * * Copyright (C) 2007, Intel Corporation. * Xiantao Zhang (xiantao.zhang@intel.com) * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/percpu.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/bitops.h> #include <linux/hrtimer.h> #include <linux/uaccess.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/pci.h> #include <asm/pgtable.h> #include <asm/gcc_intrin.h> #include <asm/pal.h> #include <asm/cacheflush.h> #include <asm/div64.h> #include <asm/tlb.h> #include <asm/elf.h> #include <asm/sn/addrs.h> #include <asm/sn/clksupport.h> #include <asm/sn/shub_mmr.h> #include "misc.h" #include "vti.h" #include "iodev.h" #include "ioapic.h" #include "lapic.h" #include "irq.h" static unsigned long kvm_vmm_base; static unsigned long kvm_vsa_base; static unsigned long kvm_vm_buffer; static unsigned long kvm_vm_buffer_size; unsigned long kvm_vmm_gp; static long vp_env_info; static struct kvm_vmm_info *kvm_vmm_info; static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu) { #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) if (vcpu->kvm->arch.is_sn2) return rtc_time(); else #endif return ia64_getreg(_IA64_REG_AR_ITC); } static void kvm_flush_icache(unsigned long start, unsigned long len) { int l; for (l = 0; l < (len + 32); l += 32) ia64_fc((void *)(start + l)); ia64_sync_i(); ia64_srlz_i(); } static void kvm_flush_tlb_all(void) { unsigned long i, j, count0, count1, stride0, stride1, addr; long flags; addr = local_cpu_data->ptce_base; count0 = local_cpu_data->ptce_count[0]; count1 = local_cpu_data->ptce_count[1]; stride0 = local_cpu_data->ptce_stride[0]; stride1 = local_cpu_data->ptce_stride[1]; local_irq_save(flags); for (i = 0; i < count0; ++i) { for (j = 0; j < count1; ++j) { ia64_ptce(addr); addr += stride1; } addr += stride0; } local_irq_restore(flags); ia64_srlz_i(); /* srlz.i implies srlz.d */ } long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) { struct ia64_pal_retval iprv; PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, (u64)opt_handler); return iprv.status; } static DEFINE_SPINLOCK(vp_lock); int kvm_arch_hardware_enable(void *garbage) { long status; long tmp_base; unsigned long pte; unsigned long saved_psr; int slot; pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); local_irq_save(saved_psr); slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); local_irq_restore(saved_psr); if (slot < 0) return -EINVAL; spin_lock(&vp_lock); status = ia64_pal_vp_init_env(kvm_vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE, __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); if (status != 0) { spin_unlock(&vp_lock); printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); return -EINVAL; } if (!kvm_vsa_base) { kvm_vsa_base = tmp_base; printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); } spin_unlock(&vp_lock); ia64_ptr_entry(0x3, slot); return 0; } void kvm_arch_hardware_disable(void *garbage) { long status; int slot; unsigned long pte; unsigned long saved_psr; unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); local_irq_save(saved_psr); slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); local_irq_restore(saved_psr); if (slot < 0) return; status = ia64_pal_vp_exit_env(host_iva); if (status) printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", status); ia64_ptr_entry(0x3, slot); } void kvm_arch_check_processor_compat(void *rtn) { *(int *)rtn = 0; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_MP_STATE: case KVM_CAP_IRQ_INJECT_STATUS: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; default: r = 0; } return r; } static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 1; return 0; } static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct kvm_mmio_req *p; struct kvm_io_device *mmio_dev; int r; p = kvm_get_vcpu_ioreq(vcpu); if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) goto mmio; vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; vcpu->mmio_size = kvm_run->mmio.len = p->size; vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; if (vcpu->mmio_is_write) memcpy(vcpu->mmio_data, &p->data, p->size); memcpy(kvm_run->mmio.data, &p->data, p->size); kvm_run->exit_reason = KVM_EXIT_MMIO; return 0; mmio: if (p->dir) r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); else r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); if (r) printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); p->state = STATE_IORESP_READY; return 1; } static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_PAL_CALL) return kvm_pal_emul(vcpu, kvm_run); else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 2; return 0; } } static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_SAL_CALL) { kvm_sal_emul(vcpu); return 1; } else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 3; return 0; } } static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (!test_and_set_bit(vector, &vpd->irr[0])) { vcpu->arch.irq_new_pending = 1; kvm_vcpu_kick(vcpu); return 1; } return 0; } /* * offset: address offset to IPI space. * value: deliver value. */ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, uint64_t vector) { switch (dm) { case SAPIC_FIXED: break; case SAPIC_NMI: vector = 2; break; case SAPIC_EXTINT: vector = 0; break; case SAPIC_INIT: case SAPIC_PMI: default: printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); return; } __apic_accept_irq(vcpu, vector); } static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, unsigned long eid) { union ia64_lid lid; int i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { lid.val = VCPU_LID(vcpu); if (lid.id == id && lid.eid == eid) return vcpu; } return NULL; } static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p = kvm_get_exit_data(vcpu); struct kvm_vcpu *target_vcpu; struct kvm_pt_regs *regs; union ia64_ipi_a addr = p->u.ipi_data.addr; union ia64_ipi_d data = p->u.ipi_data.data; target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); if (!target_vcpu) return handle_vm_error(vcpu, kvm_run); if (!target_vcpu->arch.launched) { regs = vcpu_regs(target_vcpu); regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; if (waitqueue_active(&target_vcpu->wq)) wake_up_interruptible(&target_vcpu->wq); } else { vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); if (target_vcpu != vcpu) kvm_vcpu_kick(target_vcpu); } return 1; } struct call_data { struct kvm_ptc_g ptc_g_data; struct kvm_vcpu *vcpu; }; static void vcpu_global_purge(void *info) { struct call_data *p = (struct call_data *)info; struct kvm_vcpu *vcpu = p->vcpu; if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) return; set_bit(KVM_REQ_PTC_G, &vcpu->requests); if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = p->ptc_g_data; } else { clear_bit(KVM_REQ_PTC_G, &vcpu->requests); vcpu->arch.ptc_g_count = 0; set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); } } static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p = kvm_get_exit_data(vcpu); struct kvm *kvm = vcpu->kvm; struct call_data call_data; int i; struct kvm_vcpu *vcpui; call_data.ptc_g_data = p->u.ptc_g_data; kvm_for_each_vcpu(i, vcpui, kvm) { if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || vcpu == vcpui) continue; if (waitqueue_active(&vcpui->wq)) wake_up_interruptible(&vcpui->wq); if (vcpui->cpu != -1) { call_data.vcpu = vcpui; smp_call_function_single(vcpui->cpu, vcpu_global_purge, &call_data, 1); } else printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); } return 1; } static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { return 1; } static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu) { unsigned long pte, rtc_phys_addr, map_addr; int slot; map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT); rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC; pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC)); slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT); vcpu->arch.sn_rtc_tr_slot = slot; if (slot < 0) { printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n"); slot = 0; } return slot; } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ktime_t kt; long itc_diff; unsigned long vcpu_now_itc; unsigned long expires; struct hrtimer *p_ht = &vcpu->arch.hlt_timer; unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (irqchip_in_kernel(vcpu->kvm)) { vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset; if (time_after(vcpu_now_itc, vpd->itm)) { vcpu->arch.timer_check = 1; return 1; } itc_diff = vpd->itm - vcpu_now_itc; if (itc_diff < 0) itc_diff = -itc_diff; expires = div64_u64(itc_diff, cyc_per_usec); kt = ktime_set(0, 1000 * expires); vcpu->arch.ht_active = 1; hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); vcpu->arch.mp_state = KVM_MP_STATE_HALTED; kvm_vcpu_block(vcpu); hrtimer_cancel(p_ht); vcpu->arch.ht_active = 0; if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) || kvm_cpu_has_pending_timer(vcpu)) if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) return -EINTR; return 1; } else { printk(KERN_ERR"kvm: Unsupported userspace halt!"); return 0; } } static int handle_vm_shutdown(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int handle_external_interrupt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { return 1; } static int handle_vcpu_debug(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { printk("VMM: %s", vcpu->arch.log_buf); return 1; } static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) = { [EXIT_REASON_VM_PANIC] = handle_vm_error, [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, [EXIT_REASON_PAL_CALL] = handle_pal_call, [EXIT_REASON_SAL_CALL] = handle_sal_call, [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_IPI] = handle_ipi, [EXIT_REASON_PTC_G] = handle_global_purge, [EXIT_REASON_DEBUG] = handle_vcpu_debug, }; static const int kvm_vti_max_exit_handlers = sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p_exit_data; p_exit_data = kvm_get_exit_data(vcpu); return p_exit_data->exit_reason; } /* * The guest has exited. See if we can fix it or if we need userspace * assistance. */ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { u32 exit_reason = kvm_get_exit_reason(vcpu); vcpu->arch.last_exit = exit_reason; if (exit_reason < kvm_vti_max_exit_handlers && kvm_vti_exit_handlers[exit_reason]) return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = exit_reason; } return 0; } static inline void vti_set_rr6(unsigned long rr6) { ia64_set_rr(RR6, rr6); ia64_srlz_i(); } static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) { unsigned long pte; struct kvm *kvm = vcpu->kvm; int r; /*Insert a pair of tr to map vmm*/ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); if (r < 0) goto out; vcpu->arch.vmm_tr_slot = r; /*Insert a pairt of tr to map data of vm*/ pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, pte, KVM_VM_DATA_SHIFT); if (r < 0) goto out; vcpu->arch.vm_tr_slot = r; #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) if (kvm->arch.is_sn2) { r = kvm_sn2_setup_mappings(vcpu); if (r < 0) goto out; } #endif r = 0; out: return r; } static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) if (kvm->arch.is_sn2) ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot); #endif } static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) { unsigned long psr; int r; int cpu = smp_processor_id(); if (vcpu->arch.last_run_cpu != cpu || per_cpu(last_vcpu, cpu) != vcpu) { per_cpu(last_vcpu, cpu) = vcpu; vcpu->arch.last_run_cpu = cpu; kvm_flush_tlb_all(); } vcpu->arch.host_rr6 = ia64_get_rr(RR6); vti_set_rr6(vcpu->arch.vmm_rr); local_irq_save(psr); r = kvm_insert_vmm_mapping(vcpu); local_irq_restore(psr); return r; } static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) { kvm_purge_vmm_mapping(vcpu); vti_set_rr6(vcpu->arch.host_rr6); } static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { union context *host_ctx, *guest_ctx; int r, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); again: if (signal_pending(current)) { r = -EINTR; kvm_run->exit_reason = KVM_EXIT_INTR; goto out; } preempt_disable(); local_irq_disable(); /*Get host and guest context with guest address space.*/ host_ctx = kvm_get_host_context(vcpu); guest_ctx = kvm_get_guest_context(vcpu); clear_bit(KVM_REQ_KICK, &vcpu->requests); r = kvm_vcpu_pre_transition(vcpu); if (r < 0) goto vcpu_run_fail; srcu_read_unlock(&vcpu->kvm->srcu, idx); vcpu->mode = IN_GUEST_MODE; kvm_guest_enter(); /* * Transition to the guest */ kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); kvm_vcpu_post_transition(vcpu); vcpu->arch.launched = 1; set_bit(KVM_REQ_KICK, &vcpu->requests); local_irq_enable(); /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); vcpu->mode = OUTSIDE_GUEST_MODE; preempt_enable(); idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_handle_exit(kvm_run, vcpu); if (r > 0) { if (!need_resched()) goto again; } out: srcu_read_unlock(&vcpu->kvm->srcu, idx); if (r > 0) { kvm_resched(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); goto again; } return r; vcpu_run_fail: local_irq_enable(); preempt_enable(); kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; goto out; } static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) { struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); if (!vcpu->mmio_is_write) memcpy(&p->data, vcpu->mmio_data, 8); p->state = STATE_IORESP_READY; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } if (vcpu->mmio_needed) { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); kvm_set_mmio_data(vcpu); vcpu->mmio_read_completed = 1; vcpu->mmio_needed = 0; } r = __vcpu_run(vcpu, kvm_run); out: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } struct kvm *kvm_arch_alloc_vm(void) { struct kvm *kvm; uint64_t vm_base; BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); if (!vm_base) return NULL; memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); kvm = (struct kvm *)(vm_base + offsetof(struct kvm_vm_data, kvm_vm_struct)); kvm->arch.vm_base = vm_base; printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); return kvm; } struct kvm_ia64_io_range { unsigned long start; unsigned long size; unsigned long type; }; static const struct kvm_ia64_io_range io_ranges[] = { {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, {PIB_START, PIB_SIZE, GPFN_PIB}, }; static void kvm_build_io_pmt(struct kvm *kvm) { unsigned long i, j; /* Mark I/O ranges */ for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); i++) { for (j = io_ranges[i].start; j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE) kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, io_ranges[i].type, 0); } } /*Use unused rids to virtualize guest rid.*/ #define GUEST_PHYSICAL_RR0 0x1739 #define GUEST_PHYSICAL_RR4 0x2739 #define VMM_INIT_RR 0x1660 int kvm_arch_init_vm(struct kvm *kvm) { BUG_ON(!kvm); kvm->arch.is_sn2 = ia64_platform_is("sn2"); kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; kvm->arch.vmm_init_rr = VMM_INIT_RR; /* *Fill P2M entries for MMIO/IO ranges */ kvm_build_io_pmt(kvm); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); return 0; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); int i; for (i = 0; i < 16; i++) { vpd->vgr[i] = regs->vpd.vgr[i]; vpd->vbgr[i] = regs->vpd.vbgr[i]; } for (i = 0; i < 128; i++) vpd->vcr[i] = regs->vpd.vcr[i]; vpd->vhpi = regs->vpd.vhpi; vpd->vnat = regs->vpd.vnat; vpd->vbnat = regs->vpd.vbnat; vpd->vpsr = regs->vpd.vpsr; vpd->vpr = regs->vpd.vpr; memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context)); RESTORE_REGS(mp_state); RESTORE_REGS(vmm_rr); memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); RESTORE_REGS(itr_regions); RESTORE_REGS(dtr_regions); RESTORE_REGS(tc_regions); RESTORE_REGS(irq_check); RESTORE_REGS(itc_check); RESTORE_REGS(timer_check); RESTORE_REGS(timer_pending); RESTORE_REGS(last_itc); for (i = 0; i < 8; i++) { vcpu->arch.vrr[i] = regs->vrr[i]; vcpu->arch.ibr[i] = regs->ibr[i]; vcpu->arch.dbr[i] = regs->dbr[i]; } for (i = 0; i < 4; i++) vcpu->arch.insvc[i] = regs->insvc[i]; RESTORE_REGS(xtp); RESTORE_REGS(metaphysical_rr0); RESTORE_REGS(metaphysical_rr4); RESTORE_REGS(metaphysical_saved_rr0); RESTORE_REGS(metaphysical_saved_rr4); RESTORE_REGS(fp_psr); RESTORE_REGS(saved_gp); vcpu->arch.irq_new_pending = 1; vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); set_bit(KVM_REQ_RESUME, &vcpu->requests); return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; switch (ioctl) { case KVM_SET_MEMORY_REGION: { struct kvm_memory_region kvm_mem; struct kvm_userspace_memory_region kvm_userspace_mem; r = -EFAULT; if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) goto out; kvm_userspace_mem.slot = kvm_mem.slot; kvm_userspace_mem.flags = kvm_mem.flags; kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr; kvm_userspace_mem.memory_size = kvm_mem.memory_size; r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0); if (r) goto out; break; } case KVM_CREATE_IRQCHIP: r = -EFAULT; r = kvm_ioapic_init(kvm); if (r) goto out; r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_ioapic_destroy(kvm); mutex_unlock(&kvm->slots_lock); goto out; } break; case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; r = -ENXIO; if (irqchip_in_kernel(kvm)) { __s32 status; status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); if (ioctl == KVM_IRQ_LINE_STATUS) { r = -EFAULT; irq_event.status = status; if (copy_to_user(argp, &irq_event, sizeof irq_event)) goto out; } r = 0; } break; } case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip chip; r = -EFAULT; if (copy_from_user(&chip, argp, sizeof chip)) goto out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto out; r = kvm_vm_ioctl_get_irqchip(kvm, &chip); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &chip, sizeof chip)) goto out; r = 0; break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip chip; r = -EFAULT; if (copy_from_user(&chip, argp, sizeof chip)) goto out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto out; r = kvm_vm_ioctl_set_irqchip(kvm, &chip); if (r) goto out; r = 0; break; } default: ; } out: return r; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -EINVAL; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -EINVAL; } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { return -EINVAL; } static int kvm_alloc_vmm_area(void) { if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { kvm_vmm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VMM_SIZE)); if (!kvm_vmm_base) return -ENOMEM; memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", kvm_vmm_base, kvm_vm_buffer); } return 0; } static void kvm_free_vmm_area(void) { if (kvm_vmm_base) { /*Zero this area before free to avoid bits leak!!*/ memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); kvm_vmm_base = 0; kvm_vm_buffer = 0; kvm_vsa_base = 0; } } static int vti_init_vpd(struct kvm_vcpu *vcpu) { int i; union cpuid3_t cpuid3; struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (IS_ERR(vpd)) return PTR_ERR(vpd); /* CPUID init */ for (i = 0; i < 5; i++) vpd->vcpuid[i] = ia64_get_cpuid(i); /* Limit the CPUID number to 5 */ cpuid3.value = vpd->vcpuid[3]; cpuid3.number = 4; /* 5 - 1 */ vpd->vcpuid[3] = cpuid3.value; /*Set vac and vdc fields*/ vpd->vac.a_from_int_cr = 1; vpd->vac.a_to_int_cr = 1; vpd->vac.a_from_psr = 1; vpd->vac.a_from_cpuid = 1; vpd->vac.a_cover = 1; vpd->vac.a_bsw = 1; vpd->vac.a_int = 1; vpd->vdc.d_vmsw = 1; /*Set virtual buffer*/ vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; return 0; } static int vti_create_vp(struct kvm_vcpu *vcpu) { long ret; struct vpd *vpd = vcpu->arch.vpd; unsigned long vmm_ivt; vmm_ivt = kvm_vmm_info->vmm_ivt; printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); if (ret) { printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); return -EINVAL; } return 0; } static void init_ptce_info(struct kvm_vcpu *vcpu) { ia64_ptce_info_t ptce = {0}; ia64_get_ptce(&ptce); vcpu->arch.ptce_base = ptce.base; vcpu->arch.ptce_count[0] = ptce.count[0]; vcpu->arch.ptce_count[1] = ptce.count[1]; vcpu->arch.ptce_stride[0] = ptce.stride[0]; vcpu->arch.ptce_stride[1] = ptce.stride[1]; } static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) { struct hrtimer *p_ht = &vcpu->arch.hlt_timer; if (hrtimer_cancel(p_ht)) hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); } static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) { struct kvm_vcpu *vcpu; wait_queue_head_t *q; vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); q = &vcpu->wq; if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) goto out; if (waitqueue_active(q)) wake_up_interruptible(q); out: vcpu->arch.timer_fired = 1; vcpu->arch.timer_check = 1; return HRTIMER_NORESTART; } #define PALE_RESET_ENTRY 0x80000000ffffffb0UL int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct kvm_vcpu *v; int r; int i; long itc_offset; struct kvm *kvm = vcpu->kvm; struct kvm_pt_regs *regs = vcpu_regs(vcpu); union context *p_ctx = &vcpu->arch.guest; struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); /*Init vcpu context for first run.*/ if (IS_ERR(vmm_vcpu)) return PTR_ERR(vmm_vcpu); if (kvm_vcpu_is_bsp(vcpu)) { vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; /*Set entry address for first run.*/ regs->cr_iip = PALE_RESET_ENTRY; /*Initialize itc offset for vcpus*/ itc_offset = 0UL - kvm_get_itc(vcpu); for (i = 0; i < KVM_MAX_VCPUS; i++) { v = (struct kvm_vcpu *)((char *)vcpu + sizeof(struct kvm_vcpu_data) * i); v->arch.itc_offset = itc_offset; v->arch.last_itc = 0; } } else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; r = -ENOMEM; vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); if (!vcpu->arch.apic) goto out; vcpu->arch.apic->vcpu = vcpu; p_ctx->gr[1] = 0; p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); p_ctx->gr[13] = (unsigned long)vmm_vcpu; p_ctx->psr = 0x1008522000UL; p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ p_ctx->caller_unat = 0; p_ctx->pr = 0x0; p_ctx->ar[36] = 0x0; /*unat*/ p_ctx->ar[19] = 0x0; /*rnat*/ p_ctx->ar[18] = (unsigned long)vmm_vcpu + ((sizeof(struct kvm_vcpu)+15) & ~15); p_ctx->ar[64] = 0x0; /*pfs*/ p_ctx->cr[0] = 0x7e04UL; p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; p_ctx->cr[8] = 0x3c; /*Initialize region register*/ p_ctx->rr[0] = 0x30; p_ctx->rr[1] = 0x30; p_ctx->rr[2] = 0x30; p_ctx->rr[3] = 0x30; p_ctx->rr[4] = 0x30; p_ctx->rr[5] = 0x30; p_ctx->rr[7] = 0x30; /*Initialize branch register 0*/ p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); vcpu->arch.hlt_timer.function = hlt_timer_fn; vcpu->arch.last_run_cpu = -1; vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); vcpu->arch.vsa_base = kvm_vsa_base; vcpu->arch.__gp = kvm_vmm_gp; vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); init_ptce_info(vcpu); r = 0; out: return r; } static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) { unsigned long psr; int r; local_irq_save(psr); r = kvm_insert_vmm_mapping(vcpu); local_irq_restore(psr); if (r) goto fail; r = kvm_vcpu_init(vcpu, vcpu->kvm, id); if (r) goto fail; r = vti_init_vpd(vcpu); if (r) { printk(KERN_DEBUG"kvm: vpd init error!!\n"); goto uninit; } r = vti_create_vp(vcpu); if (r) goto uninit; kvm_purge_vmm_mapping(vcpu); return 0; uninit: kvm_vcpu_uninit(vcpu); fail: return r; } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; unsigned long vm_base = kvm->arch.vm_base; int r; int cpu; BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); r = -EINVAL; if (id >= KVM_MAX_VCPUS) { printk(KERN_ERR"kvm: Can't configure vcpus > %ld", KVM_MAX_VCPUS); goto fail; } r = -ENOMEM; if (!vm_base) { printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); goto fail; } vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, vcpu_data[id].vcpu_struct)); vcpu->kvm = kvm; cpu = get_cpu(); r = vti_vcpu_setup(vcpu, id); put_cpu(); if (r) { printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); goto fail; } return vcpu; fail: return ERR_PTR(r); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { return -EINVAL; } void kvm_arch_free_vm(struct kvm *kvm) { unsigned long vm_base = kvm->arch.vm_base; if (vm_base) { memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); } } static void kvm_release_vm_pages(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int j; unsigned long base_gfn; slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { base_gfn = memslot->base_gfn; for (j = 0; j < memslot->npages; j++) { if (memslot->rmap[j]) put_page((struct page *)memslot->rmap[j]); } } } void kvm_arch_sync_events(struct kvm *kvm) { } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_iommu_unmap_guest(kvm); #ifdef KVM_CAP_DEVICE_ASSIGNMENT kvm_free_all_assigned_devices(kvm); #endif kfree(kvm->arch.vioapic); kvm_release_vm_pages(kvm); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { if (cpu != vcpu->cpu) { vcpu->cpu = cpu; if (vcpu->arch.ht_active) kvm_migrate_hlt_timer(vcpu); } } #define SAVE_REGS(_x) regs->_x = vcpu->arch._x int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); int i; vcpu_load(vcpu); for (i = 0; i < 16; i++) { regs->vpd.vgr[i] = vpd->vgr[i]; regs->vpd.vbgr[i] = vpd->vbgr[i]; } for (i = 0; i < 128; i++) regs->vpd.vcr[i] = vpd->vcr[i]; regs->vpd.vhpi = vpd->vhpi; regs->vpd.vnat = vpd->vnat; regs->vpd.vbnat = vpd->vbnat; regs->vpd.vpsr = vpd->vpsr; regs->vpd.vpr = vpd->vpr; memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context)); SAVE_REGS(mp_state); SAVE_REGS(vmm_rr); memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); SAVE_REGS(itr_regions); SAVE_REGS(dtr_regions); SAVE_REGS(tc_regions); SAVE_REGS(irq_check); SAVE_REGS(itc_check); SAVE_REGS(timer_check); SAVE_REGS(timer_pending); SAVE_REGS(last_itc); for (i = 0; i < 8; i++) { regs->vrr[i] = vcpu->arch.vrr[i]; regs->ibr[i] = vcpu->arch.ibr[i]; regs->dbr[i] = vcpu->arch.dbr[i]; } for (i = 0; i < 4; i++) regs->insvc[i] = vcpu->arch.insvc[i]; regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu); SAVE_REGS(xtp); SAVE_REGS(metaphysical_rr0); SAVE_REGS(metaphysical_rr4); SAVE_REGS(metaphysical_saved_rr0); SAVE_REGS(metaphysical_saved_rr4); SAVE_REGS(fp_psr); SAVE_REGS(saved_gp); vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, struct kvm_ia64_vcpu_stack *stack) { memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); return 0; } int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, struct kvm_ia64_vcpu_stack *stack) { memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; return 0; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { hrtimer_cancel(&vcpu->arch.hlt_timer); kfree(vcpu->arch.apic); } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; struct kvm_ia64_vcpu_stack *stack = NULL; long r; switch (ioctl) { case KVM_IA64_VCPU_GET_STACK: { struct kvm_ia64_vcpu_stack __user *user_stack; void __user *first_p = argp; r = -EFAULT; if (copy_from_user(&user_stack, first_p, sizeof(void *))) goto out; if (!access_ok(VERIFY_WRITE, user_stack, sizeof(struct kvm_ia64_vcpu_stack))) { printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " "Illegal user destination address for stack\n"); goto out; } stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); if (!stack) { r = -ENOMEM; goto out; } r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); if (r) goto out; if (copy_to_user(user_stack, stack, sizeof(struct kvm_ia64_vcpu_stack))) { r = -EFAULT; goto out; } break; } case KVM_IA64_VCPU_SET_STACK: { struct kvm_ia64_vcpu_stack __user *user_stack; void __user *first_p = argp; r = -EFAULT; if (copy_from_user(&user_stack, first_p, sizeof(void *))) goto out; if (!access_ok(VERIFY_READ, user_stack, sizeof(struct kvm_ia64_vcpu_stack))) { printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " "Illegal user address for stack\n"); goto out; } stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); if (!stack) { r = -ENOMEM; goto out; } if (copy_from_user(stack, user_stack, sizeof(struct kvm_ia64_vcpu_stack))) goto out; r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); break; } default: r = -EINVAL; } out: kfree(stack); return r; } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, int user_alloc) { unsigned long i; unsigned long pfn; int npages = memslot->npages; unsigned long base_gfn = memslot->base_gfn; if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) return -ENOMEM; for (i = 0; i < npages; i++) { pfn = gfn_to_pfn(kvm, base_gfn + i); if (!kvm_is_mmio_pfn(pfn)) { kvm_set_pmt_entry(kvm, base_gfn + i, pfn << PAGE_SHIFT, _PAGE_AR_RWX | _PAGE_MA_WB); memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); } else { kvm_set_pmt_entry(kvm, base_gfn + i, GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), _PAGE_MA_UC); memslot->rmap[i] = 0; } } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc) { return; } void kvm_arch_flush_shadow(struct kvm *kvm) { kvm_flush_remote_tlbs(kvm); } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -EINVAL; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_vcpu_uninit(vcpu); } static int vti_cpu_has_kvm_support(void) { long avail = 1, status = 1, control = 1; long ret; ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); if (ret) goto out; if (!(avail & PAL_PROC_VM_BIT)) goto out; printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); if (ret) goto out; printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); if (!(vp_env_info & VP_OPCODE)) { printk(KERN_WARNING"kvm: No opcode ability on hardware, " "vm_env_info:0x%lx\n", vp_env_info); } return 1; out: return 0; } /* * On SN2, the ITC isn't stable, so copy in fast path code to use the * SN2 RTC, replacing the ITC based default verion. */ static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info, struct module *module) { unsigned long new_ar, new_ar_sn2; unsigned long module_base; if (!ia64_platform_is("sn2")) return; module_base = (unsigned long)module->module_core; new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base; new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base; printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC " "as source\n"); /* * Copy the SN2 version of mov_ar into place. They are both * the same size, so 6 bundles is sufficient (6 * 0x10). */ memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60); } static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, struct module *module) { unsigned long module_base; unsigned long vmm_size; unsigned long vmm_offset, func_offset, fdesc_offset; struct fdesc *p_fdesc; BUG_ON(!module); if (!kvm_vmm_base) { printk("kvm: kvm area hasn't been initialized yet!!\n"); return -EFAULT; } /*Calculate new position of relocated vmm module.*/ module_base = (unsigned long)module->module_core; vmm_size = module->core_size; if (unlikely(vmm_size > KVM_VMM_SIZE)) return -EFAULT; memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); kvm_patch_vmm(vmm_info, module); kvm_flush_icache(kvm_vmm_base, vmm_size); /*Recalculate kvm_vmm_info based on new VMM*/ vmm_offset = vmm_info->vmm_ivt - module_base; kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", kvm_vmm_info->vmm_ivt); fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + fdesc_offset); func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); p_fdesc->ip = KVM_VMM_BASE + func_offset; p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", KVM_VMM_BASE+func_offset); fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + fdesc_offset); func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); p_fdesc->ip = KVM_VMM_BASE + func_offset; p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); kvm_vmm_gp = p_fdesc->gp; printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", kvm_vmm_info->vmm_entry); printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", KVM_VMM_BASE + func_offset); return 0; } int kvm_arch_init(void *opaque) { int r; struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; if (!vti_cpu_has_kvm_support()) { printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); r = -EOPNOTSUPP; goto out; } if (kvm_vmm_info) { printk(KERN_ERR "kvm: Already loaded VMM module!\n"); r = -EEXIST; goto out; } r = -ENOMEM; kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); if (!kvm_vmm_info) goto out; if (kvm_alloc_vmm_area()) goto out_free0; r = kvm_relocate_vmm(vmm_info, vmm_info->module); if (r) goto out_free1; return 0; out_free1: kvm_free_vmm_area(); out_free0: kfree(kvm_vmm_info); out: return r; } void kvm_arch_exit(void) { kvm_free_vmm_area(); kfree(kvm_vmm_info); kvm_vmm_info = NULL; } static void kvm_ia64_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { int i; long base; unsigned long n; unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); n = kvm_dirty_bitmap_bytes(memslot); base = memslot->base_gfn / BITS_PER_LONG; spin_lock(&kvm->arch.dirty_log_lock); for (i = 0; i < n/sizeof(long); ++i) { memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; dirty_bitmap[base + i] = 0; } spin_unlock(&kvm->arch.dirty_log_lock); } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; unsigned long n; struct kvm_memory_slot *memslot; int is_dirty = 0; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_MEMORY_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); r = -ENOENT; if (!memslot->dirty_bitmap) goto out; kvm_ia64_sync_dirty_log(kvm, memslot); r = kvm_get_dirty_log(kvm, log, &is_dirty); if (r) goto out; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { kvm_flush_remote_tlbs(kvm); n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } int kvm_arch_hardware_setup(void) { return 0; } void kvm_arch_hardware_unsetup(void) { } void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int me; int cpu = vcpu->cpu; if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); me = get_cpu(); if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu)) if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests)) smp_send_reschedule(cpu); put_cpu(); } int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) { return __apic_accept_irq(vcpu, irq->vector); } int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) { return apic->vcpu->vcpu_id == dest; } int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) { return 0; } int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) { return vcpu1->arch.xtp - vcpu2->arch.xtp; } int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int short_hand, int dest, int dest_mode) { struct kvm_lapic *target = vcpu->arch.apic; return (dest_mode == 0) ? kvm_apic_match_physical_addr(target, dest) : kvm_apic_match_logical_addr(target, dest); } static int find_highest_bits(int *dat) { u32 bits, bitnum; int i; /* loop for all 256 bits */ for (i = 7; i >= 0 ; i--) { bits = dat[i]; if (bits) { bitnum = fls(bits); return i * 32 + bitnum - 1; } } return -1; } int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (vpd->irr[0] & (1UL << NMI_VECTOR)) return NMI_VECTOR; if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) return ExtINT_VECTOR; return find_highest_bits((int *)&vpd->irr[0]); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return vcpu->arch.timer_fired; } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || (kvm_highest_pending_irq(vcpu) != -1); } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { mp_state->mp_state = vcpu->arch.mp_state; return 0; } static int vcpu_reset(struct kvm_vcpu *vcpu) { int r; long psr; local_irq_save(psr); r = kvm_insert_vmm_mapping(vcpu); local_irq_restore(psr); if (r) goto fail; vcpu->arch.launched = 0; kvm_arch_vcpu_uninit(vcpu); r = kvm_arch_vcpu_init(vcpu); if (r) goto fail; kvm_purge_vmm_mapping(vcpu); r = 0; fail: return r; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { int r = 0; vcpu->arch.mp_state = mp_state->mp_state; if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) r = vcpu_reset(vcpu); return r; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3622_0
crossvul-cpp_data_good_1832_0
/* * The two pass scaling function is based on: * Filtered Image Rescaling * Based on Gems III * - Schumacher general filtered image rescaling * (pp. 414-424) * by Dale Schumacher * * Additional changes by Ray Gardener, Daylon Graphics Ltd. * December 4, 1999 * * Ported to libgd by Pierre Joye. Support for multiple channels * added (argb for now). * * Initial sources code is avaibable in the Gems Source Code Packages: * http://www.acm.org/pubs/tog/GraphicsGems/GGemsIII.tar.gz * */ /* Summary: - Horizontal filter contributions are calculated on the fly, as each column is mapped from src to dst image. This lets us omit having to allocate a temporary full horizontal stretch of the src image. - If none of the src pixels within a sampling region differ, then the output pixel is forced to equal (any of) the source pixel. This ensures that filters do not corrupt areas of constant color. - Filter weight contribution results, after summing, are rounded to the nearest pixel color value instead of being casted to ILubyte (usually an int or char). Otherwise, artifacting occurs. */ /* Additional functions are available for simple rotation or up/downscaling. downscaling using the fixed point implementations are usually much faster than the existing gdImageCopyResampled while having a similar or better quality. For image rotations, the optimized versions have a lazy antialiasing for the edges of the images. For a much better antialiased result, the affine function is recommended. */ /* TODO: - Optimize pixel accesses and loops once we have continuous buffer - Add scale support for a portion only of an image (equivalent of copyresized/resampled) */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* HAVE_CONFIG_H */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #define NDEBUG 1 /* TODO: disable/enable assertions in configure. */ #include <assert.h> #include "gd.h" #include "gdhelpers.h" #include "gd_intern.h" #ifdef _MSC_VER # pragma optimize("t", on) # include <emmintrin.h> #endif static gdImagePtr gdImageScaleBilinear(gdImagePtr im, const unsigned int new_width, const unsigned int new_height); static gdImagePtr gdImageScaleBicubicFixed(gdImagePtr src, const unsigned int width, const unsigned int height); static gdImagePtr gdImageScaleNearestNeighbour(gdImagePtr im, const unsigned int width, const unsigned int height); static gdImagePtr gdImageRotateNearestNeighbour(gdImagePtr src, const float degrees, const int bgColor); static gdImagePtr gdImageRotateBilinear(gdImagePtr src, const float degrees, const int bgColor); static gdImagePtr gdImageRotateBicubicFixed(gdImagePtr src, const float degrees, const int bgColor); static gdImagePtr gdImageRotateGeneric(gdImagePtr src, const float degrees, const int bgColor); #define CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x))) /* only used here, let do a generic fixed point integers later if required by other part of GD */ typedef long gdFixed; /* Integer to fixed point */ #define gd_itofx(x) ((x) << 8) /* Float to fixed point */ #define gd_ftofx(x) (long)((x) * 256) /* Double to fixed point */ #define gd_dtofx(x) (long)((x) * 256) /* Fixed point to integer */ #define gd_fxtoi(x) ((x) >> 8) /* Fixed point to float */ # define gd_fxtof(x) ((float)(x) / 256) /* Fixed point to double */ #define gd_fxtod(x) ((double)(x) / 256) /* Multiply a fixed by a fixed */ #define gd_mulfx(x,y) (((x) * (y)) >> 8) /* Divide a fixed by a fixed */ #define gd_divfx(x,y) (((x) << 8) / (y)) typedef struct { double *Weights; /* Normalized weights of neighboring pixels */ int Left,Right; /* Bounds of source pixels window */ } ContributionType; /* Contirbution information for a single pixel */ typedef struct { ContributionType *ContribRow; /* Row (or column) of contribution weights */ unsigned int WindowSize, /* Filter window size (of affecting source pixels) */ LineLength; /* Length of line (no. or rows / cols) */ } LineContribType; /* Each core filter has its own radius */ #define DEFAULT_FILTER_BICUBIC 3.0 #define DEFAULT_FILTER_BOX 0.5 #define DEFAULT_FILTER_GENERALIZED_CUBIC 0.5 #define DEFAULT_FILTER_RADIUS 1.0 #define DEFAULT_LANCZOS8_RADIUS 8.0 #define DEFAULT_LANCZOS3_RADIUS 3.0 #define DEFAULT_HERMITE_RADIUS 1.0 #define DEFAULT_BOX_RADIUS 0.5 #define DEFAULT_TRIANGLE_RADIUS 1.0 #define DEFAULT_BELL_RADIUS 1.5 #define DEFAULT_CUBICSPLINE_RADIUS 2.0 #define DEFAULT_MITCHELL_RADIUS 2.0 #define DEFAULT_COSINE_RADIUS 1.0 #define DEFAULT_CATMULLROM_RADIUS 2.0 #define DEFAULT_QUADRATIC_RADIUS 1.5 #define DEFAULT_QUADRATICBSPLINE_RADIUS 1.5 #define DEFAULT_CUBICCONVOLUTION_RADIUS 3.0 #define DEFAULT_GAUSSIAN_RADIUS 1.0 #define DEFAULT_HANNING_RADIUS 1.0 #define DEFAULT_HAMMING_RADIUS 1.0 #define DEFAULT_SINC_RADIUS 1.0 #define DEFAULT_WELSH_RADIUS 1.0 static double KernelBessel_J1(const double x) { double p, q; register long i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p = Pone[8]; q = Qone[8]; for (i=7; i >= 0; i--) { p = p*x*x+Pone[i]; q = q*x*x+Qone[i]; } return (double)(p/q); } static double KernelBessel_P1(const double x) { double p, q; register long i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p = Pone[5]; q = Qone[5]; for (i=4; i >= 0; i--) { p = p*(8.0/x)*(8.0/x)+Pone[i]; q = q*(8.0/x)*(8.0/x)+Qone[i]; } return (double)(p/q); } static double KernelBessel_Q1(const double x) { double p, q; register long i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p = Pone[5]; q = Qone[5]; for (i=4; i >= 0; i--) { p = p*(8.0/x)*(8.0/x)+Pone[i]; q = q*(8.0/x)*(8.0/x)+Qone[i]; } return (double)(p/q); } static double KernelBessel_Order1(double x) { double p, q; if (x == 0.0) return (0.0f); p = x; if (x < 0.0) x=(-x); if (x < 8.0) return (p*KernelBessel_J1(x)); q = (double)sqrt(2.0f/(M_PI*x))*(double)(KernelBessel_P1(x)*(1.0f/sqrt(2.0f)*(sin(x)-cos(x)))-8.0f/x*KernelBessel_Q1(x)* (-1.0f/sqrt(2.0f)*(sin(x)+cos(x)))); if (p < 0.0f) q = (-q); return (q); } static double filter_bessel(const double x) { if (x == 0.0f) return (double)(M_PI/4.0f); return (KernelBessel_Order1((double)M_PI*x)/(2.0f*x)); } static double filter_blackman(const double x) { return (0.42f+0.5f*(double)cos(M_PI*x)+0.08f*(double)cos(2.0f*M_PI*x)); } /** * Bicubic interpolation kernel (a=-1): \verbatim / | 1-2|t|**2+|t|**3 , if |t| < 1 h(t) = | 4-8|t|+5|t|**2-|t|**3 , if 1<=|t|<2 | 0 , otherwise \ \endverbatim * ***bd*** 2.2004 */ static double filter_bicubic(const double t) { const double abs_t = (double)fabs(t); const double abs_t_sq = abs_t * abs_t; if (abs_t<1) return 1-2*abs_t_sq+abs_t_sq*abs_t; if (abs_t<2) return 4 - 8*abs_t +5*abs_t_sq - abs_t_sq*abs_t; return 0; } /** * Generalized cubic kernel (for a=-1 it is the same as BicubicKernel): \verbatim / | (a+2)|t|**3 - (a+3)|t|**2 + 1 , |t| <= 1 h(t) = | a|t|**3 - 5a|t|**2 + 8a|t| - 4a , 1 < |t| <= 2 | 0 , otherwise \ \endverbatim * Often used values for a are -1 and -1/2. */ static double filter_generalized_cubic(const double t) { const double a = -DEFAULT_FILTER_GENERALIZED_CUBIC; double abs_t = (double)fabs(t); double abs_t_sq = abs_t * abs_t; if (abs_t < 1) return (a + 2) * abs_t_sq * abs_t - (a + 3) * abs_t_sq + 1; if (abs_t < 2) return a * abs_t_sq * abs_t - 5 * a * abs_t_sq + 8 * a * abs_t - 4 * a; return 0; } #ifdef FUNCTION_NOT_USED_YET /* CubicSpline filter, default radius 2 */ static double filter_cubic_spline(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 1.0 ) { const double x2 = x*x; return (0.5 * x2 * x - x2 + 2.0 / 3.0); } if (x < 2.0) { return (pow(2.0 - x, 3.0)/6.0); } return 0; } #endif #ifdef FUNCTION_NOT_USED_YET /* CubicConvolution filter, default radius 3 */ static double filter_cubic_convolution(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; const double x2 = x1 * x1; const double x2_x = x2 * x; if (x <= 1.0) return ((4.0 / 3.0)* x2_x - (7.0 / 3.0) * x2 + 1.0); if (x <= 2.0) return (- (7.0 / 12.0) * x2_x + 3 * x2 - (59.0 / 12.0) * x + 2.5); if (x <= 3.0) return ( (1.0/12.0) * x2_x - (2.0 / 3.0) * x2 + 1.75 * x - 1.5); return 0; } #endif static double filter_box(double x) { if (x < - DEFAULT_FILTER_BOX) return 0.0f; if (x < DEFAULT_FILTER_BOX) return 1.0f; return 0.0f; } static double filter_catmullrom(const double x) { if (x < -2.0) return(0.0f); if (x < -1.0) return(0.5f*(4.0f+x*(8.0f+x*(5.0f+x)))); if (x < 0.0) return(0.5f*(2.0f+x*x*(-5.0f-3.0f*x))); if (x < 1.0) return(0.5f*(2.0f+x*x*(-5.0f+3.0f*x))); if (x < 2.0) return(0.5f*(4.0f+x*(-8.0f+x*(5.0f-x)))); return(0.0f); } #ifdef FUNCTION_NOT_USED_YET static double filter_filter(double t) { /* f(t) = 2|t|^3 - 3|t|^2 + 1, -1 <= t <= 1 */ if(t < 0.0) t = -t; if(t < 1.0) return((2.0 * t - 3.0) * t * t + 1.0); return(0.0); } #endif #ifdef FUNCTION_NOT_USED_YET /* Lanczos8 filter, default radius 8 */ static double filter_lanczos8(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; #define R DEFAULT_LANCZOS8_RADIUS if ( x == 0.0) return 1; if ( x < R) { return R * sin(x*M_PI) * sin(x * M_PI/ R) / (x * M_PI * x * M_PI); } return 0.0; #undef R } #endif #ifdef FUNCTION_NOT_USED_YET /* Lanczos3 filter, default radius 3 */ static double filter_lanczos3(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; #define R DEFAULT_LANCZOS3_RADIUS if ( x == 0.0) return 1; if ( x < R) { return R * sin(x*M_PI) * sin(x * M_PI / R) / (x * M_PI * x * M_PI); } return 0.0; #undef R } #endif /* Hermite filter, default radius 1 */ static double filter_hermite(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 1.0) return ((2.0 * x - 3) * x * x + 1.0 ); return 0.0; } /* Trangle filter, default radius 1 */ static double filter_triangle(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 1.0) return (1.0 - x); return 0.0; } /* Bell filter, default radius 1.5 */ static double filter_bell(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 0.5) return (0.75 - x*x); if (x < 1.5) return (0.5 * pow(x - 1.5, 2.0)); return 0.0; } /* Mitchell filter, default radius 2.0 */ static double filter_mitchell(const double x) { #define KM_B (1.0f/3.0f) #define KM_C (1.0f/3.0f) #define KM_P0 (( 6.0f - 2.0f * KM_B ) / 6.0f) #define KM_P2 ((-18.0f + 12.0f * KM_B + 6.0f * KM_C) / 6.0f) #define KM_P3 (( 12.0f - 9.0f * KM_B - 6.0f * KM_C) / 6.0f) #define KM_Q0 (( 8.0f * KM_B + 24.0f * KM_C) / 6.0f) #define KM_Q1 ((-12.0f * KM_B - 48.0f * KM_C) / 6.0f) #define KM_Q2 (( 6.0f * KM_B + 30.0f * KM_C) / 6.0f) #define KM_Q3 (( -1.0f * KM_B - 6.0f * KM_C) / 6.0f) if (x < -2.0) return(0.0f); if (x < -1.0) return(KM_Q0-x*(KM_Q1-x*(KM_Q2-x*KM_Q3))); if (x < 0.0f) return(KM_P0+x*x*(KM_P2-x*KM_P3)); if (x < 1.0f) return(KM_P0+x*x*(KM_P2+x*KM_P3)); if (x < 2.0f) return(KM_Q0+x*(KM_Q1+x*(KM_Q2+x*KM_Q3))); return(0.0f); } #ifdef FUNCTION_NOT_USED_YET /* Cosine filter, default radius 1 */ static double filter_cosine(const double x) { if ((x >= -1.0) && (x <= 1.0)) return ((cos(x * M_PI) + 1.0)/2.0); return 0; } #endif /* Quadratic filter, default radius 1.5 */ static double filter_quadratic(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x <= 0.5) return (- 2.0 * x * x + 1); if (x <= 1.5) return (x * x - 2.5* x + 1.5); return 0.0; } static double filter_bspline(const double x) { if (x>2.0f) { return 0.0f; } else { double a, b, c, d; /* Was calculated anyway cause the "if((x-1.0f) < 0)" */ const double xm1 = x - 1.0f; const double xp1 = x + 1.0f; const double xp2 = x + 2.0f; if ((xp2) <= 0.0f) a = 0.0f; else a = xp2*xp2*xp2; if ((xp1) <= 0.0f) b = 0.0f; else b = xp1*xp1*xp1; if (x <= 0) c = 0.0f; else c = x*x*x; if ((xm1) <= 0.0f) d = 0.0f; else d = xm1*xm1*xm1; return (0.16666666666666666667f * (a - (4.0f * b) + (6.0f * c) - (4.0f * d))); } } #ifdef FUNCTION_NOT_USED_YET /* QuadraticBSpline filter, default radius 1.5 */ static double filter_quadratic_bspline(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x <= 0.5) return (- x * x + 0.75); if (x <= 1.5) return (0.5 * x * x - 1.5 * x + 1.125); return 0.0; } #endif static double filter_gaussian(const double x) { /* return(exp((double) (-2.0 * x * x)) * sqrt(2.0 / M_PI)); */ return (double)(exp(-2.0f * x * x) * 0.79788456080287f); } static double filter_hanning(const double x) { /* A Cosine windowing function */ return(0.5 + 0.5 * cos(M_PI * x)); } static double filter_hamming(const double x) { /* should be (0.54+0.46*cos(M_PI*(double) x)); but this approximation is sufficient */ if (x < -1.0f) return 0.0f; if (x < 0.0f) return 0.92f*(-2.0f*x-3.0f)*x*x+1.0f; if (x < 1.0f) return 0.92f*(2.0f*x-3.0f)*x*x+1.0f; return 0.0f; } static double filter_power(const double x) { const double a = 2.0f; if (fabs(x)>1) return 0.0f; return (1.0f - (double)fabs(pow(x,a))); } static double filter_sinc(const double x) { /* X-scaled Sinc(x) function. */ if (x == 0.0) return(1.0); return (sin(M_PI * (double) x) / (M_PI * (double) x)); } #ifdef FUNCTION_NOT_USED_YET static double filter_welsh(const double x) { /* Welsh parabolic windowing filter */ if (x < 1.0) return(1 - x*x); return(0.0); } #endif #if defined(_MSC_VER) && !defined(inline) # define inline __inline #endif /* Copied from upstream's libgd */ static inline int _color_blend (const int dst, const int src) { const int src_alpha = gdTrueColorGetAlpha(src); if( src_alpha == gdAlphaOpaque ) { return src; } else { const int dst_alpha = gdTrueColorGetAlpha(dst); if( src_alpha == gdAlphaTransparent ) return dst; if( dst_alpha == gdAlphaTransparent ) { return src; } else { register int alpha, red, green, blue; const int src_weight = gdAlphaTransparent - src_alpha; const int dst_weight = (gdAlphaTransparent - dst_alpha) * src_alpha / gdAlphaMax; const int tot_weight = src_weight + dst_weight; alpha = src_alpha * dst_alpha / gdAlphaMax; red = (gdTrueColorGetRed(src) * src_weight + gdTrueColorGetRed(dst) * dst_weight) / tot_weight; green = (gdTrueColorGetGreen(src) * src_weight + gdTrueColorGetGreen(dst) * dst_weight) / tot_weight; blue = (gdTrueColorGetBlue(src) * src_weight + gdTrueColorGetBlue(dst) * dst_weight) / tot_weight; return ((alpha << 24) + (red << 16) + (green << 8) + blue); } } } static inline int _setEdgePixel(const gdImagePtr src, unsigned int x, unsigned int y, gdFixed coverage, const int bgColor) { const gdFixed f_127 = gd_itofx(127); register int c = src->tpixels[y][x]; c = c | (( (int) (gd_fxtof(gd_mulfx(coverage, f_127)) + 50.5f)) << 24); return _color_blend(bgColor, c); } static inline int getPixelOverflowTC(gdImagePtr im, const int x, const int y, const int bgColor) { if (gdImageBoundsSafe(im, x, y)) { const int c = im->tpixels[y][x]; if (c == im->transparent) { return bgColor == -1 ? gdTrueColorAlpha(0, 0, 0, 127) : bgColor; } return c; } else { register int border = 0; if (y < im->cy1) { border = im->tpixels[0][im->cx1]; goto processborder; } if (y < im->cy1) { border = im->tpixels[0][im->cx1]; goto processborder; } if (y > im->cy2) { if (x >= im->cx1 && x <= im->cx1) { border = im->tpixels[im->cy2][x]; goto processborder; } else { return gdTrueColorAlpha(0, 0, 0, 127); } } /* y is bound safe at this point */ if (x < im->cx1) { border = im->tpixels[y][im->cx1]; goto processborder; } if (x > im->cx2) { border = im->tpixels[y][im->cx2]; } processborder: if (border == im->transparent) { return gdTrueColorAlpha(0, 0, 0, 127); } else{ return gdTrueColorAlpha(gdTrueColorGetRed(border), gdTrueColorGetGreen(border), gdTrueColorGetBlue(border), 127); } } } #define colorIndex2RGBA(c) gdTrueColorAlpha(im->red[(c)], im->green[(c)], im->blue[(c)], im->alpha[(c)]) #define colorIndex2RGBcustomA(c, a) gdTrueColorAlpha(im->red[(c)], im->green[(c)], im->blue[(c)], im->alpha[(a)]) static inline int getPixelOverflowPalette(gdImagePtr im, const int x, const int y, const int bgColor) { if (gdImageBoundsSafe(im, x, y)) { const int c = im->pixels[y][x]; if (c == im->transparent) { return bgColor == -1 ? gdTrueColorAlpha(0, 0, 0, 127) : bgColor; } return colorIndex2RGBA(c); } else { register int border = 0; if (y < im->cy1) { border = gdImageGetPixel(im, im->cx1, 0); goto processborder; } if (y < im->cy1) { border = gdImageGetPixel(im, im->cx1, 0); goto processborder; } if (y > im->cy2) { if (x >= im->cx1 && x <= im->cx1) { border = gdImageGetPixel(im, x, im->cy2); goto processborder; } else { return gdTrueColorAlpha(0, 0, 0, 127); } } /* y is bound safe at this point */ if (x < im->cx1) { border = gdImageGetPixel(im, im->cx1, y); goto processborder; } if (x > im->cx2) { border = gdImageGetPixel(im, im->cx2, y); } processborder: if (border == im->transparent) { return gdTrueColorAlpha(0, 0, 0, 127); } else{ return colorIndex2RGBcustomA(border, 127); } } } static int getPixelInterpolateWeight(gdImagePtr im, const double x, const double y, const int bgColor) { /* Closest pixel <= (xf,yf) */ int sx = (int)(x); int sy = (int)(y); const double xf = x - (double)sx; const double yf = y - (double)sy; const double nxf = (double) 1.0 - xf; const double nyf = (double) 1.0 - yf; const double m1 = xf * yf; const double m2 = nxf * yf; const double m3 = xf * nyf; const double m4 = nxf * nyf; /* get color values of neighbouring pixels */ const int c1 = im->trueColor == 1 ? getPixelOverflowTC(im, sx, sy, bgColor) : getPixelOverflowPalette(im, sx, sy, bgColor); const int c2 = im->trueColor == 1 ? getPixelOverflowTC(im, sx - 1, sy, bgColor) : getPixelOverflowPalette(im, sx - 1, sy, bgColor); const int c3 = im->trueColor == 1 ? getPixelOverflowTC(im, sx, sy - 1, bgColor) : getPixelOverflowPalette(im, sx, sy - 1, bgColor); const int c4 = im->trueColor == 1 ? getPixelOverflowTC(im, sx - 1, sy - 1, bgColor) : getPixelOverflowPalette(im, sx, sy - 1, bgColor); int r, g, b, a; if (x < 0) sx--; if (y < 0) sy--; /* component-wise summing-up of color values */ if (im->trueColor) { r = (int)(m1*gdTrueColorGetRed(c1) + m2*gdTrueColorGetRed(c2) + m3*gdTrueColorGetRed(c3) + m4*gdTrueColorGetRed(c4)); g = (int)(m1*gdTrueColorGetGreen(c1) + m2*gdTrueColorGetGreen(c2) + m3*gdTrueColorGetGreen(c3) + m4*gdTrueColorGetGreen(c4)); b = (int)(m1*gdTrueColorGetBlue(c1) + m2*gdTrueColorGetBlue(c2) + m3*gdTrueColorGetBlue(c3) + m4*gdTrueColorGetBlue(c4)); a = (int)(m1*gdTrueColorGetAlpha(c1) + m2*gdTrueColorGetAlpha(c2) + m3*gdTrueColorGetAlpha(c3) + m4*gdTrueColorGetAlpha(c4)); } else { r = (int)(m1*im->red[(c1)] + m2*im->red[(c2)] + m3*im->red[(c3)] + m4*im->red[(c4)]); g = (int)(m1*im->green[(c1)] + m2*im->green[(c2)] + m3*im->green[(c3)] + m4*im->green[(c4)]); b = (int)(m1*im->blue[(c1)] + m2*im->blue[(c2)] + m3*im->blue[(c3)] + m4*im->blue[(c4)]); a = (int)(m1*im->alpha[(c1)] + m2*im->alpha[(c2)] + m3*im->alpha[(c3)] + m4*im->alpha[(c4)]); } r = CLAMP(r, 0, 255); g = CLAMP(g, 0, 255); b = CLAMP(b, 0, 255); a = CLAMP(a, 0, gdAlphaMax); return gdTrueColorAlpha(r, g, b, a); } /** * InternalFunction: getPixelInterpolated * Returns the interpolated color value using the default interpolation * method. The returned color is always in the ARGB format (truecolor). * * Parameters: * im - Image to set the default interpolation method * y - X value of the ideal position * y - Y value of the ideal position * method - Interpolation method <gdInterpolationMethod> * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE * * See also: * <gdSetInterpolationMethod> */ int getPixelInterpolated(gdImagePtr im, const double x, const double y, const int bgColor) { const int xi=(int)((x) < 0 ? x - 1: x); const int yi=(int)((y) < 0 ? y - 1: y); int yii; int i; double kernel, kernel_cache_y; double kernel_x[12], kernel_y[4]; double new_r = 0.0f, new_g = 0.0f, new_b = 0.0f, new_a = 0.0f; /* These methods use special implementations */ if (im->interpolation_id == GD_BILINEAR_FIXED || im->interpolation_id == GD_BICUBIC_FIXED || im->interpolation_id == GD_NEAREST_NEIGHBOUR) { return -1; } if (im->interpolation_id == GD_WEIGHTED4) { return getPixelInterpolateWeight(im, x, y, bgColor); } if (im->interpolation_id == GD_NEAREST_NEIGHBOUR) { if (im->trueColor == 1) { return getPixelOverflowTC(im, xi, yi, bgColor); } else { return getPixelOverflowPalette(im, xi, yi, bgColor); } } if (im->interpolation) { for (i=0; i<4; i++) { kernel_x[i] = (double) im->interpolation((double)(xi+i-1-x)); kernel_y[i] = (double) im->interpolation((double)(yi+i-1-y)); } } else { return -1; } /* * TODO: use the known fast rgba multiplication implementation once * the new formats are in place */ for (yii = yi-1; yii < yi+3; yii++) { int xii; kernel_cache_y = kernel_y[yii-(yi-1)]; if (im->trueColor) { for (xii=xi-1; xii<xi+3; xii++) { const int rgbs = getPixelOverflowTC(im, xii, yii, bgColor); kernel = kernel_cache_y * kernel_x[xii-(xi-1)]; new_r += kernel * gdTrueColorGetRed(rgbs); new_g += kernel * gdTrueColorGetGreen(rgbs); new_b += kernel * gdTrueColorGetBlue(rgbs); new_a += kernel * gdTrueColorGetAlpha(rgbs); } } else { for (xii=xi-1; xii<xi+3; xii++) { const int rgbs = getPixelOverflowPalette(im, xii, yii, bgColor); kernel = kernel_cache_y * kernel_x[xii-(xi-1)]; new_r += kernel * gdTrueColorGetRed(rgbs); new_g += kernel * gdTrueColorGetGreen(rgbs); new_b += kernel * gdTrueColorGetBlue(rgbs); new_a += kernel * gdTrueColorGetAlpha(rgbs); } } } new_r = CLAMP(new_r, 0, 255); new_g = CLAMP(new_g, 0, 255); new_b = CLAMP(new_b, 0, 255); new_a = CLAMP(new_a, 0, gdAlphaMax); return gdTrueColorAlpha(((int)new_r), ((int)new_g), ((int)new_b), ((int)new_a)); } static inline LineContribType * _gdContributionsAlloc(unsigned int line_length, unsigned int windows_size) { unsigned int u = 0; LineContribType *res; res = (LineContribType *) gdMalloc(sizeof(LineContribType)); if (!res) { return NULL; } res->WindowSize = windows_size; res->LineLength = line_length; res->ContribRow = (ContributionType *) gdMalloc(line_length * sizeof(ContributionType)); for (u = 0 ; u < line_length ; u++) { res->ContribRow[u].Weights = (double *) gdMalloc(windows_size * sizeof(double)); } return res; } static inline void _gdContributionsFree(LineContribType * p) { unsigned int u; for (u = 0; u < p->LineLength; u++) { gdFree(p->ContribRow[u].Weights); } gdFree(p->ContribRow); gdFree(p); } static inline LineContribType *_gdContributionsCalc(unsigned int line_size, unsigned int src_size, double scale_d, const interpolation_method pFilter) { double width_d; double scale_f_d = 1.0; const double filter_width_d = DEFAULT_BOX_RADIUS; int windows_size; unsigned int u; LineContribType *res; if (scale_d < 1.0) { width_d = filter_width_d / scale_d; scale_f_d = scale_d; } else { width_d= filter_width_d; } windows_size = 2 * (int)ceil(width_d) + 1; res = _gdContributionsAlloc(line_size, windows_size); for (u = 0; u < line_size; u++) { const double dCenter = (double)u / scale_d; /* get the significant edge points affecting the pixel */ register int iLeft = MAX(0, (int)floor (dCenter - width_d)); int iRight = MIN((int)ceil(dCenter + width_d), (int)src_size - 1); double dTotalWeight = 0.0; int iSrc; /* Cut edge points to fit in filter window in case of spill-off */ if (iRight - iLeft + 1 > windows_size) { if (iLeft < ((int)src_size - 1 / 2)) { iLeft++; } else { iRight--; } } res->ContribRow[u].Left = iLeft; res->ContribRow[u].Right = iRight; for (iSrc = iLeft; iSrc <= iRight; iSrc++) { dTotalWeight += (res->ContribRow[u].Weights[iSrc-iLeft] = scale_f_d * (*pFilter)(scale_f_d * (dCenter - (double)iSrc))); } if (dTotalWeight < 0.0) { _gdContributionsFree(res); return NULL; } if (dTotalWeight > 0.0) { for (iSrc = iLeft; iSrc <= iRight; iSrc++) { res->ContribRow[u].Weights[iSrc-iLeft] /= dTotalWeight; } } } return res; } static inline void _gdScaleOneAxis(gdImagePtr pSrc, gdImagePtr dst, unsigned int dst_len, unsigned int row, LineContribType *contrib, gdAxis axis) { unsigned int ndx; for (ndx = 0; ndx < dst_len; ndx++) { double r = 0, g = 0, b = 0, a = 0; const int left = contrib->ContribRow[ndx].Left; const int right = contrib->ContribRow[ndx].Right; int *dest = (axis == HORIZONTAL) ? &dst->tpixels[row][ndx] : &dst->tpixels[ndx][row]; int i; /* Accumulate each channel */ for (i = left; i <= right; i++) { const int left_channel = i - left; const int srcpx = (axis == HORIZONTAL) ? pSrc->tpixels[row][i] : pSrc->tpixels[i][row]; r += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetRed(srcpx)); g += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetGreen(srcpx)); b += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetBlue(srcpx)); a += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetAlpha(srcpx)); }/* for */ *dest = gdTrueColorAlpha(uchar_clamp(r, 0xFF), uchar_clamp(g, 0xFF), uchar_clamp(b, 0xFF), uchar_clamp(a, 0x7F)); /* alpha is 0..127 */ }/* for */ }/* _gdScaleOneAxis*/ static inline int _gdScalePass(const gdImagePtr pSrc, const unsigned int src_len, const gdImagePtr pDst, const unsigned int dst_len, const unsigned int num_lines, const gdAxis axis) { unsigned int line_ndx; LineContribType * contrib; /* Same dim, just copy it. */ assert(dst_len != src_len); // TODO: caller should handle this. contrib = _gdContributionsCalc(dst_len, src_len, (double)dst_len / (double)src_len, pSrc->interpolation); if (contrib == NULL) { return 0; } /* Scale each line */ for (line_ndx = 0; line_ndx < num_lines; line_ndx++) { _gdScaleOneAxis(pSrc, pDst, dst_len, line_ndx, contrib, axis); } _gdContributionsFree (contrib); return 1; }/* _gdScalePass*/ static gdImagePtr gdImageScaleTwoPass(const gdImagePtr src, const unsigned int new_width, const unsigned int new_height) { const unsigned int src_width = src->sx; const unsigned int src_height = src->sy; gdImagePtr tmp_im = NULL; gdImagePtr dst = NULL; /* First, handle the trivial case. */ if (src_width == new_width && src_height == new_height) { return gdImageClone(src); }/* if */ /* Convert to truecolor if it isn't; this code requires it. */ if (!src->trueColor) { gdImagePaletteToTrueColor(src); }/* if */ /* Scale horizontally unless sizes are the same. */ if (src_width == new_width) { tmp_im = src; } else { tmp_im = gdImageCreateTrueColor(new_width, src_height); if (tmp_im == NULL) { return NULL; } gdImageSetInterpolationMethod(tmp_im, src->interpolation_id); _gdScalePass(src, src_width, tmp_im, new_width, src_height, HORIZONTAL); }/* if .. else*/ /* If vertical sizes match, we're done. */ if (src_height == new_height) { assert(tmp_im != src); return tmp_im; }/* if */ /* Otherwise, we need to scale vertically. */ dst = gdImageCreateTrueColor(new_width, new_height); if (dst != NULL) { gdImageSetInterpolationMethod(dst, src->interpolation_id); _gdScalePass(tmp_im, src_height, dst, new_height, new_width, VERTICAL); }/* if */ if (src != tmp_im) { gdImageDestroy(tmp_im); }/* if */ return dst; }/* gdImageScaleTwoPass*/ /* BilinearFixed, BicubicFixed and nearest implementations are rewamped versions of the implementation in CBitmapEx http://www.codeproject.com/Articles/29121/CBitmapEx-Free-C-Bitmap-Manipulation-Class Integer only implementation, good to have for common usages like pre scale very large images before using another interpolation methods for the last step. */ static gdImagePtr gdImageScaleNearestNeighbour(gdImagePtr im, const unsigned int width, const unsigned int height) { const unsigned long new_width = MAX(1, width); const unsigned long new_height = MAX(1, height); const float dx = (float)im->sx / (float)new_width; const float dy = (float)im->sy / (float)new_height; const gdFixed f_dx = gd_ftofx(dx); const gdFixed f_dy = gd_ftofx(dy); gdImagePtr dst_img; unsigned long dst_offset_x; unsigned long dst_offset_y = 0; unsigned int i; dst_img = gdImageCreateTrueColor(new_width, new_height); if (dst_img == NULL) { return NULL; } for (i=0; i<new_height; i++) { unsigned int j; dst_offset_x = 0; if (im->trueColor) { for (j=0; j<new_width; j++) { const gdFixed f_i = gd_itofx(i); const gdFixed f_j = gd_itofx(j); const gdFixed f_a = gd_mulfx(f_i, f_dy); const gdFixed f_b = gd_mulfx(f_j, f_dx); const long m = gd_fxtoi(f_a); const long n = gd_fxtoi(f_b); dst_img->tpixels[dst_offset_y][dst_offset_x++] = im->tpixels[m][n]; } } else { for (j=0; j<new_width; j++) { const gdFixed f_i = gd_itofx(i); const gdFixed f_j = gd_itofx(j); const gdFixed f_a = gd_mulfx(f_i, f_dy); const gdFixed f_b = gd_mulfx(f_j, f_dx); const long m = gd_fxtoi(f_a); const long n = gd_fxtoi(f_b); dst_img->tpixels[dst_offset_y][dst_offset_x++] = colorIndex2RGBA(im->pixels[m][n]); } } dst_offset_y++; } return dst_img; } static inline int getPixelOverflowColorTC(gdImagePtr im, const int x, const int y, const int color) { if (gdImageBoundsSafe(im, x, y)) { const int c = im->tpixels[y][x]; if (c == im->transparent) { return gdTrueColorAlpha(0, 0, 0, 127); } return c; } else { register int border = 0; if (y < im->cy1) { border = im->tpixels[0][im->cx1]; goto processborder; } if (y < im->cy1) { border = im->tpixels[0][im->cx1]; goto processborder; } if (y > im->cy2) { if (x >= im->cx1 && x <= im->cx1) { border = im->tpixels[im->cy2][x]; goto processborder; } else { return gdTrueColorAlpha(0, 0, 0, 127); } } /* y is bound safe at this point */ if (x < im->cx1) { border = im->tpixels[y][im->cx1]; goto processborder; } if (x > im->cx2) { border = im->tpixels[y][im->cx2]; } processborder: if (border == im->transparent) { return gdTrueColorAlpha(0, 0, 0, 127); } else{ return gdTrueColorAlpha(gdTrueColorGetRed(border), gdTrueColorGetGreen(border), gdTrueColorGetBlue(border), 127); } } } static gdImagePtr gdImageScaleBilinearPalette(gdImagePtr im, const unsigned int new_width, const unsigned int new_height) { long _width = MAX(1, new_width); long _height = MAX(1, new_height); float dx = (float)gdImageSX(im) / (float)_width; float dy = (float)gdImageSY(im) / (float)_height; gdFixed f_dx = gd_ftofx(dx); gdFixed f_dy = gd_ftofx(dy); gdFixed f_1 = gd_itofx(1); int dst_offset_h; int dst_offset_v = 0; long i; gdImagePtr new_img; const int transparent = im->transparent; new_img = gdImageCreateTrueColor(new_width, new_height); if (new_img == NULL) { return NULL; } new_img->transparent = gdTrueColorAlpha(im->red[transparent], im->green[transparent], im->blue[transparent], im->alpha[transparent]); for (i=0; i < _height; i++) { long j; const gdFixed f_i = gd_itofx(i); const gdFixed f_a = gd_mulfx(f_i, f_dy); register long m = gd_fxtoi(f_a); dst_offset_h = 0; for (j=0; j < _width; j++) { /* Update bitmap */ gdFixed f_j = gd_itofx(j); gdFixed f_b = gd_mulfx(f_j, f_dx); const long n = gd_fxtoi(f_b); gdFixed f_f = f_a - gd_itofx(m); gdFixed f_g = f_b - gd_itofx(n); const gdFixed f_w1 = gd_mulfx(f_1-f_f, f_1-f_g); const gdFixed f_w2 = gd_mulfx(f_1-f_f, f_g); const gdFixed f_w3 = gd_mulfx(f_f, f_1-f_g); const gdFixed f_w4 = gd_mulfx(f_f, f_g); unsigned int pixel1; unsigned int pixel2; unsigned int pixel3; unsigned int pixel4; register gdFixed f_r1, f_r2, f_r3, f_r4, f_g1, f_g2, f_g3, f_g4, f_b1, f_b2, f_b3, f_b4, f_a1, f_a2, f_a3, f_a4; /* zero for the background color, nothig gets outside anyway */ pixel1 = getPixelOverflowPalette(im, n, m, 0); pixel2 = getPixelOverflowPalette(im, n + 1, m, 0); pixel3 = getPixelOverflowPalette(im, n, m + 1, 0); pixel4 = getPixelOverflowPalette(im, n + 1, m + 1, 0); f_r1 = gd_itofx(gdTrueColorGetRed(pixel1)); f_r2 = gd_itofx(gdTrueColorGetRed(pixel2)); f_r3 = gd_itofx(gdTrueColorGetRed(pixel3)); f_r4 = gd_itofx(gdTrueColorGetRed(pixel4)); f_g1 = gd_itofx(gdTrueColorGetGreen(pixel1)); f_g2 = gd_itofx(gdTrueColorGetGreen(pixel2)); f_g3 = gd_itofx(gdTrueColorGetGreen(pixel3)); f_g4 = gd_itofx(gdTrueColorGetGreen(pixel4)); f_b1 = gd_itofx(gdTrueColorGetBlue(pixel1)); f_b2 = gd_itofx(gdTrueColorGetBlue(pixel2)); f_b3 = gd_itofx(gdTrueColorGetBlue(pixel3)); f_b4 = gd_itofx(gdTrueColorGetBlue(pixel4)); f_a1 = gd_itofx(gdTrueColorGetAlpha(pixel1)); f_a2 = gd_itofx(gdTrueColorGetAlpha(pixel2)); f_a3 = gd_itofx(gdTrueColorGetAlpha(pixel3)); f_a4 = gd_itofx(gdTrueColorGetAlpha(pixel4)); { const char red = (char) gd_fxtoi(gd_mulfx(f_w1, f_r1) + gd_mulfx(f_w2, f_r2) + gd_mulfx(f_w3, f_r3) + gd_mulfx(f_w4, f_r4)); const char green = (char) gd_fxtoi(gd_mulfx(f_w1, f_g1) + gd_mulfx(f_w2, f_g2) + gd_mulfx(f_w3, f_g3) + gd_mulfx(f_w4, f_g4)); const char blue = (char) gd_fxtoi(gd_mulfx(f_w1, f_b1) + gd_mulfx(f_w2, f_b2) + gd_mulfx(f_w3, f_b3) + gd_mulfx(f_w4, f_b4)); const char alpha = (char) gd_fxtoi(gd_mulfx(f_w1, f_a1) + gd_mulfx(f_w2, f_a2) + gd_mulfx(f_w3, f_a3) + gd_mulfx(f_w4, f_a4)); new_img->tpixels[dst_offset_v][dst_offset_h] = gdTrueColorAlpha(red, green, blue, alpha); } dst_offset_h++; } dst_offset_v++; } return new_img; } static gdImagePtr gdImageScaleBilinearTC(gdImagePtr im, const unsigned int new_width, const unsigned int new_height) { long dst_w = MAX(1, new_width); long dst_h = MAX(1, new_height); float dx = (float)gdImageSX(im) / (float)dst_w; float dy = (float)gdImageSY(im) / (float)dst_h; gdFixed f_dx = gd_ftofx(dx); gdFixed f_dy = gd_ftofx(dy); gdFixed f_1 = gd_itofx(1); int dst_offset_h; int dst_offset_v = 0; long i; gdImagePtr new_img; new_img = gdImageCreateTrueColor(new_width, new_height); if (!new_img){ return NULL; } for (i=0; i < dst_h; i++) { long j; dst_offset_h = 0; for (j=0; j < dst_w; j++) { /* Update bitmap */ gdFixed f_i = gd_itofx(i); gdFixed f_j = gd_itofx(j); gdFixed f_a = gd_mulfx(f_i, f_dy); gdFixed f_b = gd_mulfx(f_j, f_dx); const gdFixed m = gd_fxtoi(f_a); const gdFixed n = gd_fxtoi(f_b); gdFixed f_f = f_a - gd_itofx(m); gdFixed f_g = f_b - gd_itofx(n); const gdFixed f_w1 = gd_mulfx(f_1-f_f, f_1-f_g); const gdFixed f_w2 = gd_mulfx(f_1-f_f, f_g); const gdFixed f_w3 = gd_mulfx(f_f, f_1-f_g); const gdFixed f_w4 = gd_mulfx(f_f, f_g); unsigned int pixel1; unsigned int pixel2; unsigned int pixel3; unsigned int pixel4; register gdFixed f_r1, f_r2, f_r3, f_r4, f_g1, f_g2, f_g3, f_g4, f_b1, f_b2, f_b3, f_b4, f_a1, f_a2, f_a3, f_a4; /* 0 for bgColor, nothing gets outside anyway */ pixel1 = getPixelOverflowTC(im, n, m, 0); pixel2 = getPixelOverflowTC(im, n + 1, m, 0); pixel3 = getPixelOverflowTC(im, n, m + 1, 0); pixel4 = getPixelOverflowTC(im, n + 1, m + 1, 0); f_r1 = gd_itofx(gdTrueColorGetRed(pixel1)); f_r2 = gd_itofx(gdTrueColorGetRed(pixel2)); f_r3 = gd_itofx(gdTrueColorGetRed(pixel3)); f_r4 = gd_itofx(gdTrueColorGetRed(pixel4)); f_g1 = gd_itofx(gdTrueColorGetGreen(pixel1)); f_g2 = gd_itofx(gdTrueColorGetGreen(pixel2)); f_g3 = gd_itofx(gdTrueColorGetGreen(pixel3)); f_g4 = gd_itofx(gdTrueColorGetGreen(pixel4)); f_b1 = gd_itofx(gdTrueColorGetBlue(pixel1)); f_b2 = gd_itofx(gdTrueColorGetBlue(pixel2)); f_b3 = gd_itofx(gdTrueColorGetBlue(pixel3)); f_b4 = gd_itofx(gdTrueColorGetBlue(pixel4)); f_a1 = gd_itofx(gdTrueColorGetAlpha(pixel1)); f_a2 = gd_itofx(gdTrueColorGetAlpha(pixel2)); f_a3 = gd_itofx(gdTrueColorGetAlpha(pixel3)); f_a4 = gd_itofx(gdTrueColorGetAlpha(pixel4)); { const unsigned char red = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_r1) + gd_mulfx(f_w2, f_r2) + gd_mulfx(f_w3, f_r3) + gd_mulfx(f_w4, f_r4)); const unsigned char green = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_g1) + gd_mulfx(f_w2, f_g2) + gd_mulfx(f_w3, f_g3) + gd_mulfx(f_w4, f_g4)); const unsigned char blue = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_b1) + gd_mulfx(f_w2, f_b2) + gd_mulfx(f_w3, f_b3) + gd_mulfx(f_w4, f_b4)); const unsigned char alpha = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_a1) + gd_mulfx(f_w2, f_a2) + gd_mulfx(f_w3, f_a3) + gd_mulfx(f_w4, f_a4)); new_img->tpixels[dst_offset_v][dst_offset_h] = gdTrueColorAlpha(red, green, blue, alpha); } dst_offset_h++; } dst_offset_v++; } return new_img; } static gdImagePtr gdImageScaleBilinear(gdImagePtr im, const unsigned int new_width, const unsigned int new_height) { if (im->trueColor) { return gdImageScaleBilinearTC(im, new_width, new_height); } else { return gdImageScaleBilinearPalette(im, new_width, new_height); } } static gdImagePtr gdImageScaleBicubicFixed(gdImagePtr src, const unsigned int width, const unsigned int height) { const long new_width = MAX(1, width); const long new_height = MAX(1, height); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const gdFixed f_dx = gd_ftofx((float)src_w / (float)new_width); const gdFixed f_dy = gd_ftofx((float)src_h / (float)new_height); const gdFixed f_1 = gd_itofx(1); const gdFixed f_2 = gd_itofx(2); const gdFixed f_4 = gd_itofx(4); const gdFixed f_6 = gd_itofx(6); const gdFixed f_gamma = gd_ftofx(1.04f); gdImagePtr dst; unsigned int dst_offset_x; unsigned int dst_offset_y = 0; long i; /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { gdImagePaletteToTrueColor(src); } dst = gdImageCreateTrueColor(new_width, new_height); if (!dst) { return NULL; } dst->saveAlphaFlag = 1; for (i=0; i < new_height; i++) { long j; dst_offset_x = 0; for (j=0; j < new_width; j++) { const gdFixed f_a = gd_mulfx(gd_itofx(i), f_dy); const gdFixed f_b = gd_mulfx(gd_itofx(j), f_dx); const long m = gd_fxtoi(f_a); const long n = gd_fxtoi(f_b); const gdFixed f_f = f_a - gd_itofx(m); const gdFixed f_g = f_b - gd_itofx(n); unsigned int src_offset_x[16], src_offset_y[16]; long k; register gdFixed f_red = 0, f_green = 0, f_blue = 0, f_alpha = 0; unsigned char red, green, blue, alpha = 0; int *dst_row = dst->tpixels[dst_offset_y]; if ((m < 1) || (n < 1)) { src_offset_x[0] = n; src_offset_y[0] = m; } else { src_offset_x[0] = n - 1; src_offset_y[0] = m; } if (m < 1) { src_offset_x[1] = n; src_offset_y[1] = m; } else { src_offset_x[1] = n; src_offset_y[1] = m; } if ((m < 1) || (n >= src_w - 1)) { src_offset_x[2] = n; src_offset_y[2] = m; } else { src_offset_x[2] = n + 1; src_offset_y[2] = m; } if ((m < 1) || (n >= src_w - 2)) { src_offset_x[3] = n; src_offset_y[3] = m; } else { src_offset_x[3] = n + 1 + 1; src_offset_y[3] = m; } if (n < 1) { src_offset_x[4] = n; src_offset_y[4] = m; } else { src_offset_x[4] = n - 1; src_offset_y[4] = m; } src_offset_x[5] = n; src_offset_y[5] = m; if (n >= src_w-1) { src_offset_x[6] = n; src_offset_y[6] = m; } else { src_offset_x[6] = n + 1; src_offset_y[6] = m; } if (n >= src_w - 2) { src_offset_x[7] = n; src_offset_y[7] = m; } else { src_offset_x[7] = n + 1 + 1; src_offset_y[7] = m; } if ((m >= src_h - 1) || (n < 1)) { src_offset_x[8] = n; src_offset_y[8] = m; } else { src_offset_x[8] = n - 1; src_offset_y[8] = m; } if (m >= src_h - 1) { src_offset_x[8] = n; src_offset_y[8] = m; } else { src_offset_x[9] = n; src_offset_y[9] = m; } if ((m >= src_h-1) || (n >= src_w-1)) { src_offset_x[10] = n; src_offset_y[10] = m; } else { src_offset_x[10] = n + 1; src_offset_y[10] = m; } if ((m >= src_h - 1) || (n >= src_w - 2)) { src_offset_x[11] = n; src_offset_y[11] = m; } else { src_offset_x[11] = n + 1 + 1; src_offset_y[11] = m; } if ((m >= src_h - 2) || (n < 1)) { src_offset_x[12] = n; src_offset_y[12] = m; } else { src_offset_x[12] = n - 1; src_offset_y[12] = m; } if (m >= src_h - 2) { src_offset_x[13] = n; src_offset_y[13] = m; } else { src_offset_x[13] = n; src_offset_y[13] = m; } if ((m >= src_h - 2) || (n >= src_w - 1)) { src_offset_x[14] = n; src_offset_y[14] = m; } else { src_offset_x[14] = n + 1; src_offset_y[14] = m; } if ((m >= src_h - 2) || (n >= src_w - 2)) { src_offset_x[15] = n; src_offset_y[15] = m; } else { src_offset_x[15] = n + 1 + 1; src_offset_y[15] = m; } for (k = -1; k < 3; k++) { const gdFixed f = gd_itofx(k)-f_f; const gdFixed f_fm1 = f - f_1; const gdFixed f_fp1 = f + f_1; const gdFixed f_fp2 = f + f_2; register gdFixed f_a = 0, f_b = 0, f_d = 0, f_c = 0; register gdFixed f_RY; int l; if (f_fp2 > 0) f_a = gd_mulfx(f_fp2, gd_mulfx(f_fp2,f_fp2)); if (f_fp1 > 0) f_b = gd_mulfx(f_fp1, gd_mulfx(f_fp1,f_fp1)); if (f > 0) f_c = gd_mulfx(f, gd_mulfx(f,f)); if (f_fm1 > 0) f_d = gd_mulfx(f_fm1, gd_mulfx(f_fm1,f_fm1)); f_RY = gd_divfx((f_a - gd_mulfx(f_4,f_b) + gd_mulfx(f_6,f_c) - gd_mulfx(f_4,f_d)),f_6); for (l = -1; l < 3; l++) { const gdFixed f = gd_itofx(l) - f_g; const gdFixed f_fm1 = f - f_1; const gdFixed f_fp1 = f + f_1; const gdFixed f_fp2 = f + f_2; register gdFixed f_a = 0, f_b = 0, f_c = 0, f_d = 0; register gdFixed f_RX, f_R, f_rs, f_gs, f_bs, f_ba; register int c; const int _k = ((k+1)*4) + (l+1); if (f_fp2 > 0) f_a = gd_mulfx(f_fp2,gd_mulfx(f_fp2,f_fp2)); if (f_fp1 > 0) f_b = gd_mulfx(f_fp1,gd_mulfx(f_fp1,f_fp1)); if (f > 0) f_c = gd_mulfx(f,gd_mulfx(f,f)); if (f_fm1 > 0) f_d = gd_mulfx(f_fm1,gd_mulfx(f_fm1,f_fm1)); f_RX = gd_divfx((f_a-gd_mulfx(f_4,f_b)+gd_mulfx(f_6,f_c)-gd_mulfx(f_4,f_d)),f_6); f_R = gd_mulfx(f_RY,f_RX); c = src->tpixels[*(src_offset_y + _k)][*(src_offset_x + _k)]; f_rs = gd_itofx(gdTrueColorGetRed(c)); f_gs = gd_itofx(gdTrueColorGetGreen(c)); f_bs = gd_itofx(gdTrueColorGetBlue(c)); f_ba = gd_itofx(gdTrueColorGetAlpha(c)); f_red += gd_mulfx(f_rs,f_R); f_green += gd_mulfx(f_gs,f_R); f_blue += gd_mulfx(f_bs,f_R); f_alpha += gd_mulfx(f_ba,f_R); } } red = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_red, f_gamma)), 0, 255); green = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_green, f_gamma)), 0, 255); blue = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_blue, f_gamma)), 0, 255); alpha = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_alpha, f_gamma)), 0, 127); *(dst_row + dst_offset_x) = gdTrueColorAlpha(red, green, blue, alpha); dst_offset_x++; } dst_offset_y++; } return dst; } BGD_DECLARE(gdImagePtr) gdImageScale(const gdImagePtr src, const unsigned int new_width, const unsigned int new_height) { gdImagePtr im_scaled = NULL; if (src == NULL || src->interpolation_id < 0 || src->interpolation_id > GD_METHOD_COUNT) { return 0; } switch (src->interpolation_id) { /*Special cases, optimized implementations */ case GD_NEAREST_NEIGHBOUR: im_scaled = gdImageScaleNearestNeighbour(src, new_width, new_height); break; case GD_BILINEAR_FIXED: im_scaled = gdImageScaleBilinear(src, new_width, new_height); break; case GD_BICUBIC_FIXED: im_scaled = gdImageScaleBicubicFixed(src, new_width, new_height); break; /* generic */ default: if (src->interpolation == NULL) { return NULL; } im_scaled = gdImageScaleTwoPass(src, new_width, new_height); break; } return im_scaled; } static gdImagePtr gdImageRotateNearestNeighbour(gdImagePtr src, const float degrees, const int bgColor) { float _angle = ((float) (-degrees / 180.0f) * (float)M_PI); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const unsigned int new_width = (unsigned int)(abs((int)(src_w * cos(_angle))) + abs((int)(src_h * sin(_angle))) + 0.5f); const unsigned int new_height = (unsigned int)(abs((int)(src_w * sin(_angle))) + abs((int)(src_h * cos(_angle))) + 0.5f); const gdFixed f_0_5 = gd_ftofx(0.5f); const gdFixed f_H = gd_itofx(src_h/2); const gdFixed f_W = gd_itofx(src_w/2); const gdFixed f_cos = gd_ftofx(cos(-_angle)); const gdFixed f_sin = gd_ftofx(sin(-_angle)); unsigned int dst_offset_x; unsigned int dst_offset_y = 0; unsigned int i; gdImagePtr dst; /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { gdImagePaletteToTrueColor(src); } dst = gdImageCreateTrueColor(new_width, new_height); if (!dst) { return NULL; } dst->saveAlphaFlag = 1; for (i = 0; i < new_height; i++) { unsigned int j; dst_offset_x = 0; for (j = 0; j < new_width; j++) { gdFixed f_i = gd_itofx((int)i - (int)new_height / 2); gdFixed f_j = gd_itofx((int)j - (int)new_width / 2); gdFixed f_m = gd_mulfx(f_j,f_sin) + gd_mulfx(f_i,f_cos) + f_0_5 + f_H; gdFixed f_n = gd_mulfx(f_j,f_cos) - gd_mulfx(f_i,f_sin) + f_0_5 + f_W; long m = gd_fxtoi(f_m); long n = gd_fxtoi(f_n); if ((m > 0) && (m < src_h-1) && (n > 0) && (n < src_w-1)) { if (dst_offset_y < new_height) { dst->tpixels[dst_offset_y][dst_offset_x++] = src->tpixels[m][n]; } } else { if (dst_offset_y < new_height) { dst->tpixels[dst_offset_y][dst_offset_x++] = bgColor; } } } dst_offset_y++; } return dst; } static gdImagePtr gdImageRotateGeneric(gdImagePtr src, const float degrees, const int bgColor) { float _angle = ((float) (-degrees / 180.0f) * (float)M_PI); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const unsigned int new_width = (unsigned int)(abs((int)(src_w * cos(_angle))) + abs((int)(src_h * sin(_angle))) + 0.5f); const unsigned int new_height = (unsigned int)(abs((int)(src_w * sin(_angle))) + abs((int)(src_h * cos(_angle))) + 0.5f); const gdFixed f_0_5 = gd_ftofx(0.5f); const gdFixed f_H = gd_itofx(src_h/2); const gdFixed f_W = gd_itofx(src_w/2); const gdFixed f_cos = gd_ftofx(cos(-_angle)); const gdFixed f_sin = gd_ftofx(sin(-_angle)); unsigned int dst_offset_x; unsigned int dst_offset_y = 0; unsigned int i; gdImagePtr dst; const gdFixed f_slop_y = f_sin; const gdFixed f_slop_x = f_cos; const gdFixed f_slop = f_slop_x > 0 && f_slop_x > 0 ? f_slop_x > f_slop_y ? gd_divfx(f_slop_y, f_slop_x) : gd_divfx(f_slop_x, f_slop_y) : 0; if (bgColor < 0) { return NULL; } /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { gdImagePaletteToTrueColor(src); } dst = gdImageCreateTrueColor(new_width, new_height); if (!dst) { return NULL; } dst->saveAlphaFlag = 1; for (i = 0; i < new_height; i++) { unsigned int j; dst_offset_x = 0; for (j = 0; j < new_width; j++) { gdFixed f_i = gd_itofx((int)i - (int)new_height / 2); gdFixed f_j = gd_itofx((int)j - (int)new_width / 2); gdFixed f_m = gd_mulfx(f_j,f_sin) + gd_mulfx(f_i,f_cos) + f_0_5 + f_H; gdFixed f_n = gd_mulfx(f_j,f_cos) - gd_mulfx(f_i,f_sin) + f_0_5 + f_W; long m = gd_fxtoi(f_m); long n = gd_fxtoi(f_n); if ((n <= 0) || (m <= 0) || (m >= src_h) || (n >= src_w)) { dst->tpixels[dst_offset_y][dst_offset_x++] = bgColor; } else if ((n <= 1) || (m <= 1) || (m >= src_h - 1) || (n >= src_w - 1)) { register int c = getPixelInterpolated(src, n, m, bgColor); c = c | (( gdTrueColorGetAlpha(c) + ((int)(127* gd_fxtof(f_slop)))) << 24); dst->tpixels[dst_offset_y][dst_offset_x++] = _color_blend(bgColor, c); } else { dst->tpixels[dst_offset_y][dst_offset_x++] = getPixelInterpolated(src, n, m, bgColor); } } dst_offset_y++; } return dst; } static gdImagePtr gdImageRotateBilinear(gdImagePtr src, const float degrees, const int bgColor) { float _angle = (float)((- degrees / 180.0f) * M_PI); const unsigned int src_w = gdImageSX(src); const unsigned int src_h = gdImageSY(src); unsigned int new_width = abs((int)(src_w*cos(_angle))) + abs((int)(src_h*sin(_angle) + 0.5f)); unsigned int new_height = abs((int)(src_w*sin(_angle))) + abs((int)(src_h*cos(_angle) + 0.5f)); const gdFixed f_0_5 = gd_ftofx(0.5f); const gdFixed f_H = gd_itofx(src_h/2); const gdFixed f_W = gd_itofx(src_w/2); const gdFixed f_cos = gd_ftofx(cos(-_angle)); const gdFixed f_sin = gd_ftofx(sin(-_angle)); const gdFixed f_1 = gd_itofx(1); unsigned int i; unsigned int dst_offset_x; unsigned int dst_offset_y = 0; unsigned int src_offset_x, src_offset_y; gdImagePtr dst; /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { gdImagePaletteToTrueColor(src); } dst = gdImageCreateTrueColor(new_width, new_height); if (dst == NULL) { return NULL; } dst->saveAlphaFlag = 1; for (i = 0; i < new_height; i++) { unsigned int j; dst_offset_x = 0; for (j=0; j < new_width; j++) { const gdFixed f_i = gd_itofx((int)i - (int)new_height / 2); const gdFixed f_j = gd_itofx((int)j - (int)new_width / 2); const gdFixed f_m = gd_mulfx(f_j,f_sin) + gd_mulfx(f_i,f_cos) + f_0_5 + f_H; const gdFixed f_n = gd_mulfx(f_j,f_cos) - gd_mulfx(f_i,f_sin) + f_0_5 + f_W; const unsigned int m = gd_fxtoi(f_m); const unsigned int n = gd_fxtoi(f_n); if ((m > 0) && (m < src_h - 1) && (n > 0) && (n < src_w - 1)) { const gdFixed f_f = f_m - gd_itofx(m); const gdFixed f_g = f_n - gd_itofx(n); const gdFixed f_w1 = gd_mulfx(f_1-f_f, f_1-f_g); const gdFixed f_w2 = gd_mulfx(f_1-f_f, f_g); const gdFixed f_w3 = gd_mulfx(f_f, f_1-f_g); const gdFixed f_w4 = gd_mulfx(f_f, f_g); if (n < src_w - 1) { src_offset_x = n + 1; src_offset_y = m; } if (m < src_h - 1) { src_offset_x = n; src_offset_y = m + 1; } if (!((n >= src_w - 1) || (m >= src_h - 1))) { src_offset_x = n + 1; src_offset_y = m + 1; } { const int pixel1 = src->tpixels[src_offset_y][src_offset_x]; register int pixel2, pixel3, pixel4; if (src_offset_y + 1 >= src_h) { pixel2 = bgColor; pixel3 = bgColor; pixel4 = bgColor; } else if (src_offset_x + 1 >= src_w) { pixel2 = bgColor; pixel3 = bgColor; pixel4 = bgColor; } else { pixel2 = src->tpixels[src_offset_y][src_offset_x + 1]; pixel3 = src->tpixels[src_offset_y + 1][src_offset_x]; pixel4 = src->tpixels[src_offset_y + 1][src_offset_x + 1]; } { const gdFixed f_r1 = gd_itofx(gdTrueColorGetRed(pixel1)); const gdFixed f_r2 = gd_itofx(gdTrueColorGetRed(pixel2)); const gdFixed f_r3 = gd_itofx(gdTrueColorGetRed(pixel3)); const gdFixed f_r4 = gd_itofx(gdTrueColorGetRed(pixel4)); const gdFixed f_g1 = gd_itofx(gdTrueColorGetGreen(pixel1)); const gdFixed f_g2 = gd_itofx(gdTrueColorGetGreen(pixel2)); const gdFixed f_g3 = gd_itofx(gdTrueColorGetGreen(pixel3)); const gdFixed f_g4 = gd_itofx(gdTrueColorGetGreen(pixel4)); const gdFixed f_b1 = gd_itofx(gdTrueColorGetBlue(pixel1)); const gdFixed f_b2 = gd_itofx(gdTrueColorGetBlue(pixel2)); const gdFixed f_b3 = gd_itofx(gdTrueColorGetBlue(pixel3)); const gdFixed f_b4 = gd_itofx(gdTrueColorGetBlue(pixel4)); const gdFixed f_a1 = gd_itofx(gdTrueColorGetAlpha(pixel1)); const gdFixed f_a2 = gd_itofx(gdTrueColorGetAlpha(pixel2)); const gdFixed f_a3 = gd_itofx(gdTrueColorGetAlpha(pixel3)); const gdFixed f_a4 = gd_itofx(gdTrueColorGetAlpha(pixel4)); const gdFixed f_red = gd_mulfx(f_w1, f_r1) + gd_mulfx(f_w2, f_r2) + gd_mulfx(f_w3, f_r3) + gd_mulfx(f_w4, f_r4); const gdFixed f_green = gd_mulfx(f_w1, f_g1) + gd_mulfx(f_w2, f_g2) + gd_mulfx(f_w3, f_g3) + gd_mulfx(f_w4, f_g4); const gdFixed f_blue = gd_mulfx(f_w1, f_b1) + gd_mulfx(f_w2, f_b2) + gd_mulfx(f_w3, f_b3) + gd_mulfx(f_w4, f_b4); const gdFixed f_alpha = gd_mulfx(f_w1, f_a1) + gd_mulfx(f_w2, f_a2) + gd_mulfx(f_w3, f_a3) + gd_mulfx(f_w4, f_a4); const unsigned char red = (unsigned char) CLAMP(gd_fxtoi(f_red), 0, 255); const unsigned char green = (unsigned char) CLAMP(gd_fxtoi(f_green), 0, 255); const unsigned char blue = (unsigned char) CLAMP(gd_fxtoi(f_blue), 0, 255); const unsigned char alpha = (unsigned char) CLAMP(gd_fxtoi(f_alpha), 0, 127); dst->tpixels[dst_offset_y][dst_offset_x++] = gdTrueColorAlpha(red, green, blue, alpha); } } } else { dst->tpixels[dst_offset_y][dst_offset_x++] = bgColor; } } dst_offset_y++; } return dst; } static gdImagePtr gdImageRotateBicubicFixed(gdImagePtr src, const float degrees,const int bgColor) { const float _angle = (float)((- degrees / 180.0f) * M_PI); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const unsigned int new_width = abs((int)(src_w*cos(_angle))) + abs((int)(src_h*sin(_angle) + 0.5f)); const unsigned int new_height = abs((int)(src_w*sin(_angle))) + abs((int)(src_h*cos(_angle) + 0.5f)); const gdFixed f_0_5 = gd_ftofx(0.5f); const gdFixed f_H = gd_itofx(src_h/2); const gdFixed f_W = gd_itofx(src_w/2); const gdFixed f_cos = gd_ftofx(cos(-_angle)); const gdFixed f_sin = gd_ftofx(sin(-_angle)); const gdFixed f_1 = gd_itofx(1); const gdFixed f_2 = gd_itofx(2); const gdFixed f_4 = gd_itofx(4); const gdFixed f_6 = gd_itofx(6); const gdFixed f_gama = gd_ftofx(1.04f); unsigned int dst_offset_x; unsigned int dst_offset_y = 0; unsigned int i; gdImagePtr dst; /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { gdImagePaletteToTrueColor(src); } dst = gdImageCreateTrueColor(new_width, new_height); if (dst == NULL) { return NULL; } dst->saveAlphaFlag = 1; for (i=0; i < new_height; i++) { unsigned int j; dst_offset_x = 0; for (j=0; j < new_width; j++) { const gdFixed f_i = gd_itofx((int)i - (int)new_height / 2); const gdFixed f_j = gd_itofx((int)j - (int)new_width / 2); const gdFixed f_m = gd_mulfx(f_j,f_sin) + gd_mulfx(f_i,f_cos) + f_0_5 + f_H; const gdFixed f_n = gd_mulfx(f_j,f_cos) - gd_mulfx(f_i,f_sin) + f_0_5 + f_W; const int m = gd_fxtoi(f_m); const int n = gd_fxtoi(f_n); if ((m > 0) && (m < src_h - 1) && (n > 0) && (n < src_w-1)) { const gdFixed f_f = f_m - gd_itofx(m); const gdFixed f_g = f_n - gd_itofx(n); unsigned int src_offset_x[16], src_offset_y[16]; unsigned char red, green, blue, alpha; gdFixed f_red=0, f_green=0, f_blue=0, f_alpha=0; int k; if ((m < 1) || (n < 1)) { src_offset_x[0] = n; src_offset_y[0] = m; } else { src_offset_x[0] = n - 1; src_offset_y[0] = m; } if (m < 1) { src_offset_x[1] = n; src_offset_y[1] = m; } else { src_offset_x[1] = n; src_offset_y[1] = m ; } if ((m < 1) || (n >= src_w-1)) { src_offset_x[2] = - 1; src_offset_y[2] = - 1; } else { src_offset_x[2] = n + 1; src_offset_y[2] = m ; } if ((m < 1) || (n >= src_w-2)) { src_offset_x[3] = - 1; src_offset_y[3] = - 1; } else { src_offset_x[3] = n + 1 + 1; src_offset_y[3] = m ; } if (n < 1) { src_offset_x[4] = - 1; src_offset_y[4] = - 1; } else { src_offset_x[4] = n - 1; src_offset_y[4] = m; } src_offset_x[5] = n; src_offset_y[5] = m; if (n >= src_w-1) { src_offset_x[6] = - 1; src_offset_y[6] = - 1; } else { src_offset_x[6] = n + 1; src_offset_y[6] = m; } if (n >= src_w-2) { src_offset_x[7] = - 1; src_offset_y[7] = - 1; } else { src_offset_x[7] = n + 1 + 1; src_offset_y[7] = m; } if ((m >= src_h-1) || (n < 1)) { src_offset_x[8] = - 1; src_offset_y[8] = - 1; } else { src_offset_x[8] = n - 1; src_offset_y[8] = m; } if (m >= src_h-1) { src_offset_x[8] = - 1; src_offset_y[8] = - 1; } else { src_offset_x[9] = n; src_offset_y[9] = m; } if ((m >= src_h-1) || (n >= src_w-1)) { src_offset_x[10] = - 1; src_offset_y[10] = - 1; } else { src_offset_x[10] = n + 1; src_offset_y[10] = m; } if ((m >= src_h-1) || (n >= src_w-2)) { src_offset_x[11] = - 1; src_offset_y[11] = - 1; } else { src_offset_x[11] = n + 1 + 1; src_offset_y[11] = m; } if ((m >= src_h-2) || (n < 1)) { src_offset_x[12] = - 1; src_offset_y[12] = - 1; } else { src_offset_x[12] = n - 1; src_offset_y[12] = m; } if (m >= src_h-2) { src_offset_x[13] = - 1; src_offset_y[13] = - 1; } else { src_offset_x[13] = n; src_offset_y[13] = m; } if ((m >= src_h-2) || (n >= src_w - 1)) { src_offset_x[14] = - 1; src_offset_y[14] = - 1; } else { src_offset_x[14] = n + 1; src_offset_y[14] = m; } if ((m >= src_h-2) || (n >= src_w-2)) { src_offset_x[15] = - 1; src_offset_y[15] = - 1; } else { src_offset_x[15] = n + 1 + 1; src_offset_y[15] = m; } for (k=-1; k<3; k++) { const gdFixed f = gd_itofx(k)-f_f; const gdFixed f_fm1 = f - f_1; const gdFixed f_fp1 = f + f_1; const gdFixed f_fp2 = f + f_2; gdFixed f_a = 0, f_b = 0,f_c = 0, f_d = 0; gdFixed f_RY; int l; if (f_fp2 > 0) { f_a = gd_mulfx(f_fp2,gd_mulfx(f_fp2,f_fp2)); } if (f_fp1 > 0) { f_b = gd_mulfx(f_fp1,gd_mulfx(f_fp1,f_fp1)); } if (f > 0) { f_c = gd_mulfx(f,gd_mulfx(f,f)); } if (f_fm1 > 0) { f_d = gd_mulfx(f_fm1,gd_mulfx(f_fm1,f_fm1)); } f_RY = gd_divfx((f_a-gd_mulfx(f_4,f_b)+gd_mulfx(f_6,f_c)-gd_mulfx(f_4,f_d)),f_6); for (l=-1; l< 3; l++) { const gdFixed f = gd_itofx(l) - f_g; const gdFixed f_fm1 = f - f_1; const gdFixed f_fp1 = f + f_1; const gdFixed f_fp2 = f + f_2; gdFixed f_a = 0, f_b = 0, f_c = 0, f_d = 0; gdFixed f_RX, f_R; const int _k = ((k + 1) * 4) + (l + 1); register gdFixed f_rs, f_gs, f_bs, f_as; register int c; if (f_fp2 > 0) { f_a = gd_mulfx(f_fp2,gd_mulfx(f_fp2,f_fp2)); } if (f_fp1 > 0) { f_b = gd_mulfx(f_fp1,gd_mulfx(f_fp1,f_fp1)); } if (f > 0) { f_c = gd_mulfx(f,gd_mulfx(f,f)); } if (f_fm1 > 0) { f_d = gd_mulfx(f_fm1,gd_mulfx(f_fm1,f_fm1)); } f_RX = gd_divfx((f_a - gd_mulfx(f_4, f_b) + gd_mulfx(f_6, f_c) - gd_mulfx(f_4, f_d)), f_6); f_R = gd_mulfx(f_RY, f_RX); if ((src_offset_x[_k] <= 0) || (src_offset_y[_k] <= 0) || (src_offset_y[_k] >= src_h) || (src_offset_x[_k] >= src_w)) { c = bgColor; } else if ((src_offset_x[_k] <= 1) || (src_offset_y[_k] <= 1) || (src_offset_y[_k] >= (int)src_h - 1) || (src_offset_x[_k] >= (int)src_w - 1)) { gdFixed f_127 = gd_itofx(127); c = src->tpixels[src_offset_y[_k]][src_offset_x[_k]]; c = c | (( (int) (gd_fxtof(gd_mulfx(f_R, f_127)) + 50.5f)) << 24); c = _color_blend(bgColor, c); } else { c = src->tpixels[src_offset_y[_k]][src_offset_x[_k]]; } f_rs = gd_itofx(gdTrueColorGetRed(c)); f_gs = gd_itofx(gdTrueColorGetGreen(c)); f_bs = gd_itofx(gdTrueColorGetBlue(c)); f_as = gd_itofx(gdTrueColorGetAlpha(c)); f_red += gd_mulfx(f_rs, f_R); f_green += gd_mulfx(f_gs, f_R); f_blue += gd_mulfx(f_bs, f_R); f_alpha += gd_mulfx(f_as, f_R); } } red = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_red, f_gama)), 0, 255); green = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_green, f_gama)), 0, 255); blue = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_blue, f_gama)), 0, 255); alpha = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_alpha, f_gama)), 0, 127); dst->tpixels[dst_offset_y][dst_offset_x] = gdTrueColorAlpha(red, green, blue, alpha); } else { dst->tpixels[dst_offset_y][dst_offset_x] = bgColor; } dst_offset_x++; } dst_offset_y++; } return dst; } BGD_DECLARE(gdImagePtr) gdImageRotateInterpolated(const gdImagePtr src, const float angle, int bgcolor) { /* round to two decimals and keep the 100x multiplication to use it in the common square angles case later. Keep the two decimal precisions so smaller rotation steps can be done, useful for slow animations, f.e. */ const int angle_rounded = fmod((int) floorf(angle * 100), 360 * 100); if (bgcolor < 0) { return NULL; } /* 0 && 90 degrees multiple rotation, 0 rotation simply clones the return image and convert it to truecolor, as we must return truecolor image. */ switch (angle_rounded) { case 0: { gdImagePtr dst = gdImageClone(src); if (dst == NULL) { return NULL; } if (dst->trueColor == 0) { gdImagePaletteToTrueColor(dst); } return dst; } case -27000: case 9000: return gdImageRotate90(src, 0); case -18000: case 18000: return gdImageRotate180(src, 0); case -9000: case 27000: return gdImageRotate270(src, 0); } if (src == NULL || src->interpolation_id < 1 || src->interpolation_id > GD_METHOD_COUNT) { return NULL; } switch (src->interpolation_id) { case GD_NEAREST_NEIGHBOUR: return gdImageRotateNearestNeighbour(src, angle, bgcolor); break; case GD_BILINEAR_FIXED: return gdImageRotateBilinear(src, angle, bgcolor); break; case GD_BICUBIC_FIXED: return gdImageRotateBicubicFixed(src, angle, bgcolor); break; default: return gdImageRotateGeneric(src, angle, bgcolor); } return NULL; } /** * Title: Affine transformation **/ /** * Group: Transform **/ static void gdImageClipRectangle(gdImagePtr im, gdRectPtr r) { int c1x, c1y, c2x, c2y; int x1,y1; gdImageGetClip(im, &c1x, &c1y, &c2x, &c2y); x1 = r->x + r->width - 1; y1 = r->y + r->height - 1; r->x = CLAMP(r->x, c1x, c2x); r->y = CLAMP(r->y, c1y, c2y); r->width = CLAMP(x1, c1x, c2x) - r->x + 1; r->height = CLAMP(y1, c1y, c2y) - r->y + 1; } void gdDumpRect(const char *msg, gdRectPtr r) { printf("%s (%i, %i) (%i, %i)\n", msg, r->x, r->y, r->width, r->height); } /** * Function: gdTransformAffineGetImage * Applies an affine transformation to a region and return an image * containing the complete transformation. * * Parameters: * dst - Pointer to a gdImagePtr to store the created image, NULL when * the creation or the transformation failed * src - Source image * src_area - rectangle defining the source region to transform * dstY - Y position in the destination image * affine - The desired affine transformation * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE */ BGD_DECLARE(int) gdTransformAffineGetImage(gdImagePtr *dst, const gdImagePtr src, gdRectPtr src_area, const double affine[6]) { int res; double m[6]; gdRect bbox; gdRect area_full; if (src_area == NULL) { area_full.x = 0; area_full.y = 0; area_full.width = gdImageSX(src); area_full.height = gdImageSY(src); src_area = &area_full; } gdTransformAffineBoundingBox(src_area, affine, &bbox); *dst = gdImageCreateTrueColor(bbox.width, bbox.height); if (*dst == NULL) { return GD_FALSE; } (*dst)->saveAlphaFlag = 1; if (!src->trueColor) { gdImagePaletteToTrueColor(src); } /* Translate to dst origin (0,0) */ gdAffineTranslate(m, -bbox.x, -bbox.y); gdAffineConcat(m, affine, m); gdImageAlphaBlending(*dst, 0); res = gdTransformAffineCopy(*dst, 0,0, src, src_area, m); if (res != GD_TRUE) { gdImageDestroy(*dst); dst = NULL; return GD_FALSE; } else { return GD_TRUE; } } /** * Function: gdTransformAffineCopy * Applies an affine transformation to a region and copy the result * in a destination to the given position. * * Parameters: * dst - Image to draw the transformed image * src - Source image * dstX - X position in the destination image * dstY - Y position in the destination image * src_area - Rectangular region to rotate in the src image * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE */ BGD_DECLARE(int) gdTransformAffineCopy(gdImagePtr dst, int dst_x, int dst_y, const gdImagePtr src, gdRectPtr src_region, const double affine[6]) { int c1x,c1y,c2x,c2y; int backclip = 0; int backup_clipx1, backup_clipy1, backup_clipx2, backup_clipy2; register int x, y, src_offset_x, src_offset_y; double inv[6]; int *dst_p; gdPointF pt, src_pt; gdRect bbox; int end_x, end_y; gdInterpolationMethod interpolation_id_bak = GD_DEFAULT; /* These methods use special implementations */ if (src->interpolation_id == GD_BILINEAR_FIXED || src->interpolation_id == GD_BICUBIC_FIXED || src->interpolation_id == GD_NEAREST_NEIGHBOUR) { interpolation_id_bak = src->interpolation_id; gdImageSetInterpolationMethod(src, GD_BICUBIC); } gdImageClipRectangle(src, src_region); if (src_region->x > 0 || src_region->y > 0 || src_region->width < gdImageSX(src) || src_region->height < gdImageSY(src)) { backclip = 1; gdImageGetClip(src, &backup_clipx1, &backup_clipy1, &backup_clipx2, &backup_clipy2); gdImageSetClip(src, src_region->x, src_region->y, src_region->x + src_region->width - 1, src_region->y + src_region->height - 1); } if (!gdTransformAffineBoundingBox(src_region, affine, &bbox)) { if (backclip) { gdImageSetClip(src, backup_clipx1, backup_clipy1, backup_clipx2, backup_clipy2); } gdImageSetInterpolationMethod(src, interpolation_id_bak); return GD_FALSE; } gdImageGetClip(dst, &c1x, &c1y, &c2x, &c2y); end_x = bbox.width + (int) fabs(bbox.x); end_y = bbox.height + (int) fabs(bbox.y); /* Get inverse affine to let us work with destination -> source */ gdAffineInvert(inv, affine); src_offset_x = src_region->x; src_offset_y = src_region->y; if (dst->alphaBlendingFlag) { for (y = bbox.y; y <= end_y; y++) { pt.y = y + 0.5; for (x = 0; x <= end_x; x++) { pt.x = x + 0.5; gdAffineApplyToPointF(&src_pt, &pt, inv); gdImageSetPixel(dst, dst_x + x, dst_y + y, getPixelInterpolated(src, src_offset_x + src_pt.x, src_offset_y + src_pt.y, 0)); } } } else { for (y = 0; y <= end_y; y++) { pt.y = y + 0.5 + bbox.y; if ((dst_y + y) < 0 || ((dst_y + y) > gdImageSY(dst) -1)) { continue; } dst_p = dst->tpixels[dst_y + y] + dst_x; for (x = 0; x <= end_x; x++) { pt.x = x + 0.5 + bbox.x; gdAffineApplyToPointF(&src_pt, &pt, inv); if ((dst_x + x) < 0 || (dst_x + x) > (gdImageSX(dst) - 1)) { break; } *(dst_p++) = getPixelInterpolated(src, src_offset_x + src_pt.x, src_offset_y + src_pt.y, -1); } } } /* Restore clip if required */ if (backclip) { gdImageSetClip(src, backup_clipx1, backup_clipy1, backup_clipx2, backup_clipy2); } gdImageSetInterpolationMethod(src, interpolation_id_bak); return GD_TRUE; } /** * Function: gdTransformAffineBoundingBox * Returns the bounding box of an affine transformation applied to a * rectangular area <gdRect> * * Parameters: * src - Rectangular source area for the affine transformation * affine - the affine transformation * bbox - the resulting bounding box * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE */ BGD_DECLARE(int) gdTransformAffineBoundingBox(gdRectPtr src, const double affine[6], gdRectPtr bbox) { gdPointF extent[4], min, max, point; int i; extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) src->width; extent[1].y=0.0; extent[2].x=(double) src->width; extent[2].y=(double) src->height; extent[3].x=0.0; extent[3].y=(double) src->height; for (i=0; i < 4; i++) { point=extent[i]; if (gdAffineApplyToPointF(&extent[i], &point, affine) != GD_TRUE) { return GD_FALSE; } } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } bbox->x = (int) min.x; bbox->y = (int) min.y; bbox->width = (int) floor(max.x - min.x) - 1; bbox->height = (int) floor(max.y - min.y); return GD_TRUE; } BGD_DECLARE(int) gdImageSetInterpolationMethod(gdImagePtr im, gdInterpolationMethod id) { if (im == NULL || id < 0 || id > GD_METHOD_COUNT) { return 0; } switch (id) { case GD_DEFAULT: id = GD_BILINEAR_FIXED; /* Optimized versions */ case GD_BILINEAR_FIXED: case GD_BICUBIC_FIXED: case GD_NEAREST_NEIGHBOUR: case GD_WEIGHTED4: im->interpolation = NULL; break; /* generic versions*/ case GD_BELL: im->interpolation = filter_bell; break; case GD_BESSEL: im->interpolation = filter_bessel; break; case GD_BICUBIC: im->interpolation = filter_bicubic; break; case GD_BLACKMAN: im->interpolation = filter_blackman; break; case GD_BOX: im->interpolation = filter_box; break; case GD_BSPLINE: im->interpolation = filter_bspline; break; case GD_CATMULLROM: im->interpolation = filter_catmullrom; break; case GD_GAUSSIAN: im->interpolation = filter_gaussian; break; case GD_GENERALIZED_CUBIC: im->interpolation = filter_generalized_cubic; break; case GD_HERMITE: im->interpolation = filter_hermite; break; case GD_HAMMING: im->interpolation = filter_hamming; break; case GD_HANNING: im->interpolation = filter_hanning; break; case GD_MITCHELL: im->interpolation = filter_mitchell; break; case GD_POWER: im->interpolation = filter_power; break; case GD_QUADRATIC: im->interpolation = filter_quadratic; break; case GD_SINC: im->interpolation = filter_sinc; break; case GD_TRIANGLE: im->interpolation = filter_triangle; break; default: return 0; break; } im->interpolation_id = id; return 1; } /* Return the interpolation mode set in 'im'. This is here so that * the value can be read via a language or VM with an FFI but no * (portable) way to extract the value from the struct. */ BGD_DECLARE(gdInterpolationMethod) gdImageGetInterpolationMethod(gdImagePtr im) { return im->interpolation_id; } #ifdef _MSC_VER # pragma optimize("", on) #endif
./CrossVul/dataset_final_sorted/CWE-399/c/good_1832_0
crossvul-cpp_data_good_3468_2
/* * Copyright (C) 2009 Red Hat, Inc. * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/mmu_notifier.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/mm_inline.h> #include <linux/kthread.h> #include <linux/khugepaged.h> #include <linux/freezer.h> #include <linux/mman.h> #include <asm/tlb.h> #include <asm/pgalloc.h> #include "internal.h" /* * By default transparent hugepage support is enabled for all mappings * and khugepaged scans all mappings. Defrag is only invoked by * khugepaged hugepage allocations and by page faults inside * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived * allocations. */ unsigned long transparent_hugepage_flags __read_mostly = #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS (1<<TRANSPARENT_HUGEPAGE_FLAG)| #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| #endif (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); /* default scan 8*512 pte (or vmas) every 30 second */ static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; static unsigned int khugepaged_pages_collapsed; static unsigned int khugepaged_full_scans; static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; /* during fragmentation poll the hugepage allocator once every minute */ static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; static struct task_struct *khugepaged_thread __read_mostly; static DEFINE_MUTEX(khugepaged_mutex); static DEFINE_SPINLOCK(khugepaged_mm_lock); static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); /* * default collapse hugepages if there is at least one pte mapped like * it would have happened if the vma was large enough during page * fault. */ static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; static int khugepaged(void *none); static int mm_slots_hash_init(void); static int khugepaged_slab_init(void); static void khugepaged_slab_free(void); #define MM_SLOTS_HASH_HEADS 1024 static struct hlist_head *mm_slots_hash __read_mostly; static struct kmem_cache *mm_slot_cache __read_mostly; /** * struct mm_slot - hash lookup from mm to mm_slot * @hash: hash collision list * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head * @mm: the mm that this information is valid for */ struct mm_slot { struct hlist_node hash; struct list_head mm_node; struct mm_struct *mm; }; /** * struct khugepaged_scan - cursor for scanning * @mm_head: the head of the mm list to scan * @mm_slot: the current mm_slot we are scanning * @address: the next address inside that to be scanned * * There is only the one khugepaged_scan instance of this cursor structure. */ struct khugepaged_scan { struct list_head mm_head; struct mm_slot *mm_slot; unsigned long address; } khugepaged_scan = { .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), }; static int set_recommended_min_free_kbytes(void) { struct zone *zone; int nr_zones = 0; unsigned long recommended_min; extern int min_free_kbytes; if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags) && !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) return 0; for_each_populated_zone(zone) nr_zones++; /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ recommended_min = pageblock_nr_pages * nr_zones * 2; /* * Make sure that on average at least two pageblocks are almost free * of another type, one for a migratetype to fall back to and a * second to avoid subsequent fallbacks of other types There are 3 * MIGRATE_TYPES we care about. */ recommended_min += pageblock_nr_pages * nr_zones * MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; /* don't ever allow to reserve more than 5% of the lowmem */ recommended_min = min(recommended_min, (unsigned long) nr_free_buffer_pages() / 20); recommended_min <<= (PAGE_SHIFT-10); if (recommended_min > min_free_kbytes) min_free_kbytes = recommended_min; setup_per_zone_wmarks(); return 0; } late_initcall(set_recommended_min_free_kbytes); static int start_khugepaged(void) { int err = 0; if (khugepaged_enabled()) { int wakeup; if (unlikely(!mm_slot_cache || !mm_slots_hash)) { err = -ENOMEM; goto out; } mutex_lock(&khugepaged_mutex); if (!khugepaged_thread) khugepaged_thread = kthread_run(khugepaged, NULL, "khugepaged"); if (unlikely(IS_ERR(khugepaged_thread))) { printk(KERN_ERR "khugepaged: kthread_run(khugepaged) failed\n"); err = PTR_ERR(khugepaged_thread); khugepaged_thread = NULL; } wakeup = !list_empty(&khugepaged_scan.mm_head); mutex_unlock(&khugepaged_mutex); if (wakeup) wake_up_interruptible(&khugepaged_wait); set_recommended_min_free_kbytes(); } else /* wakeup to exit */ wake_up_interruptible(&khugepaged_wait); out: return err; } #ifdef CONFIG_SYSFS static ssize_t double_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag enabled, enum transparent_hugepage_flag req_madv) { if (test_bit(enabled, &transparent_hugepage_flags)) { VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); return sprintf(buf, "[always] madvise never\n"); } else if (test_bit(req_madv, &transparent_hugepage_flags)) return sprintf(buf, "always [madvise] never\n"); else return sprintf(buf, "always madvise [never]\n"); } static ssize_t double_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag enabled, enum transparent_hugepage_flag req_madv) { if (!memcmp("always", buf, min(sizeof("always")-1, count))) { set_bit(enabled, &transparent_hugepage_flags); clear_bit(req_madv, &transparent_hugepage_flags); } else if (!memcmp("madvise", buf, min(sizeof("madvise")-1, count))) { clear_bit(enabled, &transparent_hugepage_flags); set_bit(req_madv, &transparent_hugepage_flags); } else if (!memcmp("never", buf, min(sizeof("never")-1, count))) { clear_bit(enabled, &transparent_hugepage_flags); clear_bit(req_madv, &transparent_hugepage_flags); } else return -EINVAL; return count; } static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return double_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); } static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { ssize_t ret; ret = double_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); if (ret > 0) { int err = start_khugepaged(); if (err) ret = err; } if (ret > 0 && (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags) || test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))) set_recommended_min_free_kbytes(); return ret; } static struct kobj_attribute enabled_attr = __ATTR(enabled, 0644, enabled_show, enabled_store); static ssize_t single_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag) { return sprintf(buf, "%d\n", !!test_bit(flag, &transparent_hugepage_flags)); } static ssize_t single_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag flag) { unsigned long value; int ret; ret = kstrtoul(buf, 10, &value); if (ret < 0) return ret; if (value > 1) return -EINVAL; if (value) set_bit(flag, &transparent_hugepage_flags); else clear_bit(flag, &transparent_hugepage_flags); return count; } /* * Currently defrag only disables __GFP_NOWAIT for allocation. A blind * __GFP_REPEAT is too aggressive, it's never worth swapping tons of * memory just to allocate one more hugepage. */ static ssize_t defrag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return double_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); } static ssize_t defrag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return double_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); } static struct kobj_attribute defrag_attr = __ATTR(defrag, 0644, defrag_show, defrag_store); #ifdef CONFIG_DEBUG_VM static ssize_t debug_cow_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); } static ssize_t debug_cow_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return single_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); } static struct kobj_attribute debug_cow_attr = __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); #endif /* CONFIG_DEBUG_VM */ static struct attribute *hugepage_attr[] = { &enabled_attr.attr, &defrag_attr.attr, #ifdef CONFIG_DEBUG_VM &debug_cow_attr.attr, #endif NULL, }; static struct attribute_group hugepage_attr_group = { .attrs = hugepage_attr, }; static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); } static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long msecs; int err; err = strict_strtoul(buf, 10, &msecs); if (err || msecs > UINT_MAX) return -EINVAL; khugepaged_scan_sleep_millisecs = msecs; wake_up_interruptible(&khugepaged_wait); return count; } static struct kobj_attribute scan_sleep_millisecs_attr = __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, scan_sleep_millisecs_store); static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); } static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long msecs; int err; err = strict_strtoul(buf, 10, &msecs); if (err || msecs > UINT_MAX) return -EINVAL; khugepaged_alloc_sleep_millisecs = msecs; wake_up_interruptible(&khugepaged_wait); return count; } static struct kobj_attribute alloc_sleep_millisecs_attr = __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, alloc_sleep_millisecs_store); static ssize_t pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_pages_to_scan); } static ssize_t pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long pages; err = strict_strtoul(buf, 10, &pages); if (err || !pages || pages > UINT_MAX) return -EINVAL; khugepaged_pages_to_scan = pages; return count; } static struct kobj_attribute pages_to_scan_attr = __ATTR(pages_to_scan, 0644, pages_to_scan_show, pages_to_scan_store); static ssize_t pages_collapsed_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_pages_collapsed); } static struct kobj_attribute pages_collapsed_attr = __ATTR_RO(pages_collapsed); static ssize_t full_scans_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_full_scans); } static struct kobj_attribute full_scans_attr = __ATTR_RO(full_scans); static ssize_t khugepaged_defrag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } static ssize_t khugepaged_defrag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return single_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } static struct kobj_attribute khugepaged_defrag_attr = __ATTR(defrag, 0644, khugepaged_defrag_show, khugepaged_defrag_store); /* * max_ptes_none controls if khugepaged should collapse hugepages over * any unmapped ptes in turn potentially increasing the memory * footprint of the vmas. When max_ptes_none is 0 khugepaged will not * reduce the available free memory in the system as it * runs. Increasing max_ptes_none will instead potentially reduce the * free memory in the system during the khugepaged scan. */ static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", khugepaged_max_ptes_none); } static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long max_ptes_none; err = strict_strtoul(buf, 10, &max_ptes_none); if (err || max_ptes_none > HPAGE_PMD_NR-1) return -EINVAL; khugepaged_max_ptes_none = max_ptes_none; return count; } static struct kobj_attribute khugepaged_max_ptes_none_attr = __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, khugepaged_max_ptes_none_store); static struct attribute *khugepaged_attr[] = { &khugepaged_defrag_attr.attr, &khugepaged_max_ptes_none_attr.attr, &pages_to_scan_attr.attr, &pages_collapsed_attr.attr, &full_scans_attr.attr, &scan_sleep_millisecs_attr.attr, &alloc_sleep_millisecs_attr.attr, NULL, }; static struct attribute_group khugepaged_attr_group = { .attrs = khugepaged_attr, .name = "khugepaged", }; #endif /* CONFIG_SYSFS */ static int __init hugepage_init(void) { int err; #ifdef CONFIG_SYSFS static struct kobject *hugepage_kobj; #endif err = -EINVAL; if (!has_transparent_hugepage()) { transparent_hugepage_flags = 0; goto out; } #ifdef CONFIG_SYSFS err = -ENOMEM; hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); if (unlikely(!hugepage_kobj)) { printk(KERN_ERR "hugepage: failed kobject create\n"); goto out; } err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group); if (err) { printk(KERN_ERR "hugepage: failed register hugeage group\n"); goto out; } err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group); if (err) { printk(KERN_ERR "hugepage: failed register hugeage group\n"); goto out; } #endif err = khugepaged_slab_init(); if (err) goto out; err = mm_slots_hash_init(); if (err) { khugepaged_slab_free(); goto out; } /* * By default disable transparent hugepages on smaller systems, * where the extra memory used could hurt more than TLB overhead * is likely to save. The admin can still enable it through /sys. */ if (totalram_pages < (512 << (20 - PAGE_SHIFT))) transparent_hugepage_flags = 0; start_khugepaged(); set_recommended_min_free_kbytes(); out: return err; } module_init(hugepage_init) static int __init setup_transparent_hugepage(char *str) { int ret = 0; if (!str) goto out; if (!strcmp(str, "always")) { set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } else if (!strcmp(str, "madvise")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } else if (!strcmp(str, "never")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } out: if (!ret) printk(KERN_WARNING "transparent_hugepage= cannot parse, ignored\n"); return ret; } __setup("transparent_hugepage=", setup_transparent_hugepage); static void prepare_pmd_huge_pte(pgtable_t pgtable, struct mm_struct *mm) { assert_spin_locked(&mm->page_table_lock); /* FIFO */ if (!mm->pmd_huge_pte) INIT_LIST_HEAD(&pgtable->lru); else list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); mm->pmd_huge_pte = pgtable; } static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) { if (likely(vma->vm_flags & VM_WRITE)) pmd = pmd_mkwrite(pmd); return pmd; } static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *page) { int ret = 0; pgtable_t pgtable; VM_BUG_ON(!PageCompound(page)); pgtable = pte_alloc_one(mm, haddr); if (unlikely(!pgtable)) { mem_cgroup_uncharge_page(page); put_page(page); return VM_FAULT_OOM; } clear_huge_page(page, haddr, HPAGE_PMD_NR); __SetPageUptodate(page); spin_lock(&mm->page_table_lock); if (unlikely(!pmd_none(*pmd))) { spin_unlock(&mm->page_table_lock); mem_cgroup_uncharge_page(page); put_page(page); pte_free(mm, pgtable); } else { pmd_t entry; entry = mk_pmd(page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = pmd_mkhuge(entry); /* * The spinlocking to take the lru_lock inside * page_add_new_anon_rmap() acts as a full memory * barrier to be sure clear_huge_page writes become * visible after the set_pmd_at() write. */ page_add_new_anon_rmap(page, vma, haddr); set_pmd_at(mm, haddr, pmd, entry); prepare_pmd_huge_pte(pgtable, mm); add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); spin_unlock(&mm->page_table_lock); } return ret; } static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) { return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; } static inline struct page *alloc_hugepage_vma(int defrag, struct vm_area_struct *vma, unsigned long haddr, int nd, gfp_t extra_gfp) { return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), HPAGE_PMD_ORDER, vma, haddr, nd); } #ifndef CONFIG_NUMA static inline struct page *alloc_hugepage(int defrag) { return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), HPAGE_PMD_ORDER); } #endif int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) { struct page *page; unsigned long haddr = address & HPAGE_PMD_MASK; pte_t *pte; if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) { if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; if (unlikely(khugepaged_enter(vma))) return VM_FAULT_OOM; page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), vma, haddr, numa_node_id(), 0); if (unlikely(!page)) { count_vm_event(THP_FAULT_FALLBACK); goto out; } count_vm_event(THP_FAULT_ALLOC); if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { put_page(page); goto out; } return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); } out: /* * Use __pte_alloc instead of pte_alloc_map, because we can't * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ if (unlikely(__pte_alloc(mm, vma, pmd, address))) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) return 0; /* * A regular pmd is established and it can't morph into a huge pmd * from under us anymore at this point because we hold the mmap_sem * read mode and khugepaged takes it in write mode. So now it's * safe to run pte_offset_map(). */ pte = pte_offset_map(pmd, address); return handle_pte_fault(mm, vma, address, pte, pmd, flags); } int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma) { struct page *src_page; pmd_t pmd; pgtable_t pgtable; int ret; ret = -ENOMEM; pgtable = pte_alloc_one(dst_mm, addr); if (unlikely(!pgtable)) goto out; spin_lock(&dst_mm->page_table_lock); spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING); ret = -EAGAIN; pmd = *src_pmd; if (unlikely(!pmd_trans_huge(pmd))) { pte_free(dst_mm, pgtable); goto out_unlock; } if (unlikely(pmd_trans_splitting(pmd))) { /* split huge page running from under us */ spin_unlock(&src_mm->page_table_lock); spin_unlock(&dst_mm->page_table_lock); pte_free(dst_mm, pgtable); wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ goto out; } src_page = pmd_page(pmd); VM_BUG_ON(!PageHead(src_page)); get_page(src_page); page_dup_rmap(src_page); add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); pmdp_set_wrprotect(src_mm, addr, src_pmd); pmd = pmd_mkold(pmd_wrprotect(pmd)); set_pmd_at(dst_mm, addr, dst_pmd, pmd); prepare_pmd_huge_pte(pgtable, dst_mm); ret = 0; out_unlock: spin_unlock(&src_mm->page_table_lock); spin_unlock(&dst_mm->page_table_lock); out: return ret; } /* no "address" argument so destroys page coloring of some arch */ pgtable_t get_pmd_huge_pte(struct mm_struct *mm) { pgtable_t pgtable; assert_spin_locked(&mm->page_table_lock); /* FIFO */ pgtable = mm->pmd_huge_pte; if (list_empty(&pgtable->lru)) mm->pmd_huge_pte = NULL; else { mm->pmd_huge_pte = list_entry(pgtable->lru.next, struct page, lru); list_del(&pgtable->lru); } return pgtable; } static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) { pgtable_t pgtable; pmd_t _pmd; int ret = 0, i; struct page **pages; pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, GFP_KERNEL); if (unlikely(!pages)) { ret |= VM_FAULT_OOM; goto out; } for (i = 0; i < HPAGE_PMD_NR; i++) { pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | __GFP_OTHER_NODE, vma, address, page_to_nid(page)); if (unlikely(!pages[i] || mem_cgroup_newpage_charge(pages[i], mm, GFP_KERNEL))) { if (pages[i]) put_page(pages[i]); mem_cgroup_uncharge_start(); while (--i >= 0) { mem_cgroup_uncharge_page(pages[i]); put_page(pages[i]); } mem_cgroup_uncharge_end(); kfree(pages); ret |= VM_FAULT_OOM; goto out; } } for (i = 0; i < HPAGE_PMD_NR; i++) { copy_user_highpage(pages[i], page + i, haddr + PAGE_SHIFT*i, vma); __SetPageUptodate(pages[i]); cond_resched(); } spin_lock(&mm->page_table_lock); if (unlikely(!pmd_same(*pmd, orig_pmd))) goto out_free_pages; VM_BUG_ON(!PageHead(page)); pmdp_clear_flush_notify(vma, haddr, pmd); /* leave pmd empty until pte is filled */ pgtable = get_pmd_huge_pte(mm); pmd_populate(mm, &_pmd, pgtable); for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { pte_t *pte, entry; entry = mk_pte(pages[i], vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); page_add_new_anon_rmap(pages[i], vma, haddr); pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte_none(*pte)); set_pte_at(mm, haddr, pte, entry); pte_unmap(pte); } kfree(pages); mm->nr_ptes++; smp_wmb(); /* make pte visible before pmd */ pmd_populate(mm, pmd, pgtable); page_remove_rmap(page); spin_unlock(&mm->page_table_lock); ret |= VM_FAULT_WRITE; put_page(page); out: return ret; out_free_pages: spin_unlock(&mm->page_table_lock); mem_cgroup_uncharge_start(); for (i = 0; i < HPAGE_PMD_NR; i++) { mem_cgroup_uncharge_page(pages[i]); put_page(pages[i]); } mem_cgroup_uncharge_end(); kfree(pages); goto out; } int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd) { int ret = 0; struct page *page, *new_page; unsigned long haddr; VM_BUG_ON(!vma->anon_vma); spin_lock(&mm->page_table_lock); if (unlikely(!pmd_same(*pmd, orig_pmd))) goto out_unlock; page = pmd_page(orig_pmd); VM_BUG_ON(!PageCompound(page) || !PageHead(page)); haddr = address & HPAGE_PMD_MASK; if (page_mapcount(page) == 1) { pmd_t entry; entry = pmd_mkyoung(orig_pmd); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) update_mmu_cache(vma, address, entry); ret |= VM_FAULT_WRITE; goto out_unlock; } get_page(page); spin_unlock(&mm->page_table_lock); if (transparent_hugepage_enabled(vma) && !transparent_hugepage_debug_cow()) new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), vma, haddr, numa_node_id(), 0); else new_page = NULL; if (unlikely(!new_page)) { count_vm_event(THP_FAULT_FALLBACK); ret = do_huge_pmd_wp_page_fallback(mm, vma, address, pmd, orig_pmd, page, haddr); put_page(page); goto out; } count_vm_event(THP_FAULT_ALLOC); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { put_page(new_page); put_page(page); ret |= VM_FAULT_OOM; goto out; } copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); __SetPageUptodate(new_page); spin_lock(&mm->page_table_lock); put_page(page); if (unlikely(!pmd_same(*pmd, orig_pmd))) { mem_cgroup_uncharge_page(new_page); put_page(new_page); } else { pmd_t entry; VM_BUG_ON(!PageHead(page)); entry = mk_pmd(new_page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = pmd_mkhuge(entry); pmdp_clear_flush_notify(vma, haddr, pmd); page_add_new_anon_rmap(new_page, vma, haddr); set_pmd_at(mm, haddr, pmd, entry); update_mmu_cache(vma, address, entry); page_remove_rmap(page); put_page(page); ret |= VM_FAULT_WRITE; } out_unlock: spin_unlock(&mm->page_table_lock); out: return ret; } struct page *follow_trans_huge_pmd(struct mm_struct *mm, unsigned long addr, pmd_t *pmd, unsigned int flags) { struct page *page = NULL; assert_spin_locked(&mm->page_table_lock); if (flags & FOLL_WRITE && !pmd_write(*pmd)) goto out; page = pmd_page(*pmd); VM_BUG_ON(!PageHead(page)); if (flags & FOLL_TOUCH) { pmd_t _pmd; /* * We should set the dirty bit only for FOLL_WRITE but * for now the dirty bit in the pmd is meaningless. * And if the dirty bit will become meaningful and * we'll only set it with FOLL_WRITE, an atomic * set_bit will be required on the pmd to set the * young bit, instead of the current set_pmd_at. */ _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd); } page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; VM_BUG_ON(!PageCompound(page)); if (flags & FOLL_GET) get_page(page); out: return page; } int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd) { int ret = 0; spin_lock(&tlb->mm->page_table_lock); if (likely(pmd_trans_huge(*pmd))) { if (unlikely(pmd_trans_splitting(*pmd))) { spin_unlock(&tlb->mm->page_table_lock); wait_split_huge_page(vma->anon_vma, pmd); } else { struct page *page; pgtable_t pgtable; pgtable = get_pmd_huge_pte(tlb->mm); page = pmd_page(*pmd); pmd_clear(pmd); page_remove_rmap(page); VM_BUG_ON(page_mapcount(page) < 0); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); VM_BUG_ON(!PageHead(page)); spin_unlock(&tlb->mm->page_table_lock); tlb_remove_page(tlb, page); pte_free(tlb->mm, pgtable); ret = 1; } } else spin_unlock(&tlb->mm->page_table_lock); return ret; } int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) { int ret = 0; spin_lock(&vma->vm_mm->page_table_lock); if (likely(pmd_trans_huge(*pmd))) { ret = !pmd_trans_splitting(*pmd); spin_unlock(&vma->vm_mm->page_table_lock); if (unlikely(!ret)) wait_split_huge_page(vma->anon_vma, pmd); else { /* * All logical pages in the range are present * if backed by a huge page. */ memset(vec, 1, (end - addr) >> PAGE_SHIFT); } } else spin_unlock(&vma->vm_mm->page_table_lock); return ret; } int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot) { struct mm_struct *mm = vma->vm_mm; int ret = 0; spin_lock(&mm->page_table_lock); if (likely(pmd_trans_huge(*pmd))) { if (unlikely(pmd_trans_splitting(*pmd))) { spin_unlock(&mm->page_table_lock); wait_split_huge_page(vma->anon_vma, pmd); } else { pmd_t entry; entry = pmdp_get_and_clear(mm, addr, pmd); entry = pmd_modify(entry, newprot); set_pmd_at(mm, addr, pmd, entry); spin_unlock(&vma->vm_mm->page_table_lock); flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE); ret = 1; } } else spin_unlock(&vma->vm_mm->page_table_lock); return ret; } pmd_t *page_check_address_pmd(struct page *page, struct mm_struct *mm, unsigned long address, enum page_check_address_pmd_flag flag) { pgd_t *pgd; pud_t *pud; pmd_t *pmd, *ret = NULL; if (address & ~HPAGE_PMD_MASK) goto out; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; pud = pud_offset(pgd, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) goto out; if (pmd_page(*pmd) != page) goto out; /* * split_vma() may create temporary aliased mappings. There is * no risk as long as all huge pmd are found and have their * splitting bit set before __split_huge_page_refcount * runs. Finding the same huge pmd more than once during the * same rmap walk is not a problem. */ if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && pmd_trans_splitting(*pmd)) goto out; if (pmd_trans_huge(*pmd)) { VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && !pmd_trans_splitting(*pmd)); ret = pmd; } out: return ret; } static int __split_huge_page_splitting(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; pmd_t *pmd; int ret = 0; spin_lock(&mm->page_table_lock); pmd = page_check_address_pmd(page, mm, address, PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG); if (pmd) { /* * We can't temporarily set the pmd to null in order * to split it, the pmd must remain marked huge at all * times or the VM won't take the pmd_trans_huge paths * and it won't wait on the anon_vma->root->lock to * serialize against split_huge_page*. */ pmdp_splitting_flush_notify(vma, address, pmd); ret = 1; } spin_unlock(&mm->page_table_lock); return ret; } static void __split_huge_page_refcount(struct page *page) { int i; unsigned long head_index = page->index; struct zone *zone = page_zone(page); int zonestat; /* prevent PageLRU to go away from under us, and freeze lru stats */ spin_lock_irq(&zone->lru_lock); compound_lock(page); for (i = 1; i < HPAGE_PMD_NR; i++) { struct page *page_tail = page + i; /* tail_page->_count cannot change */ atomic_sub(atomic_read(&page_tail->_count), &page->_count); BUG_ON(page_count(page) <= 0); atomic_add(page_mapcount(page) + 1, &page_tail->_count); BUG_ON(atomic_read(&page_tail->_count) <= 0); /* after clearing PageTail the gup refcount can be released */ smp_mb(); /* * retain hwpoison flag of the poisoned tail page: * fix for the unsuitable process killed on Guest Machine(KVM) * by the memory-failure. */ page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; page_tail->flags |= (page->flags & ((1L << PG_referenced) | (1L << PG_swapbacked) | (1L << PG_mlocked) | (1L << PG_uptodate))); page_tail->flags |= (1L << PG_dirty); /* * 1) clear PageTail before overwriting first_page * 2) clear PageTail before clearing PageHead for VM_BUG_ON */ smp_wmb(); /* * __split_huge_page_splitting() already set the * splitting bit in all pmd that could map this * hugepage, that will ensure no CPU can alter the * mapcount on the head page. The mapcount is only * accounted in the head page and it has to be * transferred to all tail pages in the below code. So * for this code to be safe, the split the mapcount * can't change. But that doesn't mean userland can't * keep changing and reading the page contents while * we transfer the mapcount, so the pmd splitting * status is achieved setting a reserved bit in the * pmd, not by clearing the present bit. */ BUG_ON(page_mapcount(page_tail)); page_tail->_mapcount = page->_mapcount; BUG_ON(page_tail->mapping); page_tail->mapping = page->mapping; page_tail->index = ++head_index; BUG_ON(!PageAnon(page_tail)); BUG_ON(!PageUptodate(page_tail)); BUG_ON(!PageDirty(page_tail)); BUG_ON(!PageSwapBacked(page_tail)); mem_cgroup_split_huge_fixup(page, page_tail); lru_add_page_tail(zone, page, page_tail); } __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); /* * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics, * so adjust those appropriately if this page is on the LRU. */ if (PageLRU(page)) { zonestat = NR_LRU_BASE + page_lru(page); __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1)); } ClearPageCompound(page); compound_unlock(page); spin_unlock_irq(&zone->lru_lock); for (i = 1; i < HPAGE_PMD_NR; i++) { struct page *page_tail = page + i; BUG_ON(page_count(page_tail) <= 0); /* * Tail pages may be freed if there wasn't any mapping * like if add_to_swap() is running on a lru page that * had its mapping zapped. And freeing these pages * requires taking the lru_lock so we do the put_page * of the tail pages after the split is complete. */ put_page(page_tail); } /* * Only the head page (now become a regular page) is required * to be pinned by the caller. */ BUG_ON(page_count(page) <= 0); } static int __split_huge_page_map(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; pmd_t *pmd, _pmd; int ret = 0, i; pgtable_t pgtable; unsigned long haddr; spin_lock(&mm->page_table_lock); pmd = page_check_address_pmd(page, mm, address, PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); if (pmd) { pgtable = get_pmd_huge_pte(mm); pmd_populate(mm, &_pmd, pgtable); for (i = 0, haddr = address; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { pte_t *pte, entry; BUG_ON(PageCompound(page+i)); entry = mk_pte(page + i, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (!pmd_write(*pmd)) entry = pte_wrprotect(entry); else BUG_ON(page_mapcount(page) != 1); if (!pmd_young(*pmd)) entry = pte_mkold(entry); pte = pte_offset_map(&_pmd, haddr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, haddr, pte, entry); pte_unmap(pte); } mm->nr_ptes++; smp_wmb(); /* make pte visible before pmd */ /* * Up to this point the pmd is present and huge and * userland has the whole access to the hugepage * during the split (which happens in place). If we * overwrite the pmd with the not-huge version * pointing to the pte here (which of course we could * if all CPUs were bug free), userland could trigger * a small page size TLB miss on the small sized TLB * while the hugepage TLB entry is still established * in the huge TLB. Some CPU doesn't like that. See * http://support.amd.com/us/Processor_TechDocs/41322.pdf, * Erratum 383 on page 93. Intel should be safe but is * also warns that it's only safe if the permission * and cache attributes of the two entries loaded in * the two TLB is identical (which should be the case * here). But it is generally safer to never allow * small and huge TLB entries for the same virtual * address to be loaded simultaneously. So instead of * doing "pmd_populate(); flush_tlb_range();" we first * mark the current pmd notpresent (atomically because * here the pmd_trans_huge and pmd_trans_splitting * must remain set at all times on the pmd until the * split is complete for this pmd), then we flush the * SMP TLB and finally we write the non-huge version * of the pmd entry with pmd_populate. */ set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd)); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmd_populate(mm, pmd, pgtable); ret = 1; } spin_unlock(&mm->page_table_lock); return ret; } /* must be called with anon_vma->root->lock hold */ static void __split_huge_page(struct page *page, struct anon_vma *anon_vma) { int mapcount, mapcount2; struct anon_vma_chain *avc; BUG_ON(!PageHead(page)); BUG_ON(PageTail(page)); mapcount = 0; list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { struct vm_area_struct *vma = avc->vma; unsigned long addr = vma_address(page, vma); BUG_ON(is_vma_temporary_stack(vma)); if (addr == -EFAULT) continue; mapcount += __split_huge_page_splitting(page, vma, addr); } /* * It is critical that new vmas are added to the tail of the * anon_vma list. This guarantes that if copy_huge_pmd() runs * and establishes a child pmd before * __split_huge_page_splitting() freezes the parent pmd (so if * we fail to prevent copy_huge_pmd() from running until the * whole __split_huge_page() is complete), we will still see * the newly established pmd of the child later during the * walk, to be able to set it as pmd_trans_splitting too. */ if (mapcount != page_mapcount(page)) printk(KERN_ERR "mapcount %d page_mapcount %d\n", mapcount, page_mapcount(page)); BUG_ON(mapcount != page_mapcount(page)); __split_huge_page_refcount(page); mapcount2 = 0; list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { struct vm_area_struct *vma = avc->vma; unsigned long addr = vma_address(page, vma); BUG_ON(is_vma_temporary_stack(vma)); if (addr == -EFAULT) continue; mapcount2 += __split_huge_page_map(page, vma, addr); } if (mapcount != mapcount2) printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", mapcount, mapcount2, page_mapcount(page)); BUG_ON(mapcount != mapcount2); } int split_huge_page(struct page *page) { struct anon_vma *anon_vma; int ret = 1; BUG_ON(!PageAnon(page)); anon_vma = page_lock_anon_vma(page); if (!anon_vma) goto out; ret = 0; if (!PageCompound(page)) goto out_unlock; BUG_ON(!PageSwapBacked(page)); __split_huge_page(page, anon_vma); count_vm_event(THP_SPLIT); BUG_ON(PageCompound(page)); out_unlock: page_unlock_anon_vma(anon_vma); out: return ret; } #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \ VM_HUGETLB|VM_SHARED|VM_MAYSHARE) int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { switch (advice) { case MADV_HUGEPAGE: /* * Be somewhat over-protective like KSM for now! */ if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) return -EINVAL; *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; /* * If the vma become good for khugepaged to scan, * register it here without waiting a page fault that * may not happen any time soon. */ if (unlikely(khugepaged_enter_vma_merge(vma))) return -ENOMEM; break; case MADV_NOHUGEPAGE: /* * Be somewhat over-protective like KSM for now! */ if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) return -EINVAL; *vm_flags &= ~VM_HUGEPAGE; *vm_flags |= VM_NOHUGEPAGE; /* * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning * this vma even if we leave the mm registered in khugepaged if * it got registered before VM_NOHUGEPAGE was set. */ break; } return 0; } static int __init khugepaged_slab_init(void) { mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", sizeof(struct mm_slot), __alignof__(struct mm_slot), 0, NULL); if (!mm_slot_cache) return -ENOMEM; return 0; } static void __init khugepaged_slab_free(void) { kmem_cache_destroy(mm_slot_cache); mm_slot_cache = NULL; } static inline struct mm_slot *alloc_mm_slot(void) { if (!mm_slot_cache) /* initialization failed */ return NULL; return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); } static inline void free_mm_slot(struct mm_slot *mm_slot) { kmem_cache_free(mm_slot_cache, mm_slot); } static int __init mm_slots_hash_init(void) { mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), GFP_KERNEL); if (!mm_slots_hash) return -ENOMEM; return 0; } #if 0 static void __init mm_slots_hash_free(void) { kfree(mm_slots_hash); mm_slots_hash = NULL; } #endif static struct mm_slot *get_mm_slot(struct mm_struct *mm) { struct mm_slot *mm_slot; struct hlist_head *bucket; struct hlist_node *node; bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) % MM_SLOTS_HASH_HEADS]; hlist_for_each_entry(mm_slot, node, bucket, hash) { if (mm == mm_slot->mm) return mm_slot; } return NULL; } static void insert_to_mm_slots_hash(struct mm_struct *mm, struct mm_slot *mm_slot) { struct hlist_head *bucket; bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) % MM_SLOTS_HASH_HEADS]; mm_slot->mm = mm; hlist_add_head(&mm_slot->hash, bucket); } static inline int khugepaged_test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } int __khugepaged_enter(struct mm_struct *mm) { struct mm_slot *mm_slot; int wakeup; mm_slot = alloc_mm_slot(); if (!mm_slot) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ VM_BUG_ON(khugepaged_test_exit(mm)); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; } spin_lock(&khugepaged_mm_lock); insert_to_mm_slots_hash(mm, mm_slot); /* * Insert just behind the scanning cursor, to let the area settle * down a little. */ wakeup = list_empty(&khugepaged_scan.mm_head); list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); spin_unlock(&khugepaged_mm_lock); atomic_inc(&mm->mm_count); if (wakeup) wake_up_interruptible(&khugepaged_wait); return 0; } int khugepaged_enter_vma_merge(struct vm_area_struct *vma) { unsigned long hstart, hend; if (!vma->anon_vma) /* * Not yet faulted in so we will register later in the * page fault if needed. */ return 0; if (vma->vm_ops) /* khugepaged not yet working on file or special mappings */ return 0; /* * If is_pfn_mapping() is true is_learn_pfn_mapping() must be * true too, verify it here. */ VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) return khugepaged_enter(vma); return 0; } void __khugepaged_exit(struct mm_struct *mm) { struct mm_slot *mm_slot; int free = 0; spin_lock(&khugepaged_mm_lock); mm_slot = get_mm_slot(mm); if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { hlist_del(&mm_slot->hash); list_del(&mm_slot->mm_node); free = 1; } if (free) { spin_unlock(&khugepaged_mm_lock); clear_bit(MMF_VM_HUGEPAGE, &mm->flags); free_mm_slot(mm_slot); mmdrop(mm); } else if (mm_slot) { spin_unlock(&khugepaged_mm_lock); /* * This is required to serialize against * khugepaged_test_exit() (which is guaranteed to run * under mmap sem read mode). Stop here (after we * return all pagetables will be destroyed) until * khugepaged has finished working on the pagetables * under the mmap_sem. */ down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); } else spin_unlock(&khugepaged_mm_lock); } static void release_pte_page(struct page *page) { /* 0 stands for page_is_file_cache(page) == false */ dec_zone_page_state(page, NR_ISOLATED_ANON + 0); unlock_page(page); putback_lru_page(page); } static void release_pte_pages(pte_t *pte, pte_t *_pte) { while (--_pte >= pte) { pte_t pteval = *_pte; if (!pte_none(pteval)) release_pte_page(pte_page(pteval)); } } static void release_all_pte_pages(pte_t *pte) { release_pte_pages(pte, pte + HPAGE_PMD_NR); } static int __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, pte_t *pte) { struct page *page; pte_t *_pte; int referenced = 0, isolated = 0, none = 0; for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pte_t pteval = *_pte; if (pte_none(pteval)) { if (++none <= khugepaged_max_ptes_none) continue; else { release_pte_pages(pte, _pte); goto out; } } if (!pte_present(pteval) || !pte_write(pteval)) { release_pte_pages(pte, _pte); goto out; } page = vm_normal_page(vma, address, pteval); if (unlikely(!page)) { release_pte_pages(pte, _pte); goto out; } VM_BUG_ON(PageCompound(page)); BUG_ON(!PageAnon(page)); VM_BUG_ON(!PageSwapBacked(page)); /* cannot use mapcount: can't collapse if there's a gup pin */ if (page_count(page) != 1) { release_pte_pages(pte, _pte); goto out; } /* * We can do it before isolate_lru_page because the * page can't be freed from under us. NOTE: PG_lock * is needed to serialize against split_huge_page * when invoked from the VM. */ if (!trylock_page(page)) { release_pte_pages(pte, _pte); goto out; } /* * Isolate the page to avoid collapsing an hugepage * currently in use by the VM. */ if (isolate_lru_page(page)) { unlock_page(page); release_pte_pages(pte, _pte); goto out; } /* 0 stands for page_is_file_cache(page) == false */ inc_zone_page_state(page, NR_ISOLATED_ANON + 0); VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageLRU(page)); /* If there is no mapped pte young don't collapse the page */ if (pte_young(pteval) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) referenced = 1; } if (unlikely(!referenced)) release_all_pte_pages(pte); else isolated = 1; out: return isolated; } static void __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl) { pte_t *_pte; for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { pte_t pteval = *_pte; struct page *src_page; if (pte_none(pteval)) { clear_user_highpage(page, address); add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); } else { src_page = pte_page(pteval); copy_user_highpage(page, src_page, address, vma); VM_BUG_ON(page_mapcount(src_page) != 1); VM_BUG_ON(page_count(src_page) != 2); release_pte_page(src_page); /* * ptl mostly unnecessary, but preempt has to * be disabled to update the per-cpu stats * inside page_remove_rmap(). */ spin_lock(ptl); /* * paravirt calls inside pte_clear here are * superfluous. */ pte_clear(vma->vm_mm, address, _pte); page_remove_rmap(src_page); spin_unlock(ptl); free_page_and_swap_cache(src_page); } address += PAGE_SIZE; page++; } } static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, int node) { pgd_t *pgd; pud_t *pud; pmd_t *pmd, _pmd; pte_t *pte; pgtable_t pgtable; struct page *new_page; spinlock_t *ptl; int isolated; unsigned long hstart, hend; VM_BUG_ON(address & ~HPAGE_PMD_MASK); #ifndef CONFIG_NUMA VM_BUG_ON(!*hpage); new_page = *hpage; if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { up_read(&mm->mmap_sem); return; } #else VM_BUG_ON(*hpage); /* * Allocate the page while the vma is still valid and under * the mmap_sem read mode so there is no memory allocation * later when we take the mmap_sem in write mode. This is more * friendly behavior (OTOH it may actually hide bugs) to * filesystems in userland with daemons allocating memory in * the userland I/O paths. Allocating memory with the * mmap_sem in read mode is good idea also to allow greater * scalability. */ new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, node, __GFP_OTHER_NODE); if (unlikely(!new_page)) { up_read(&mm->mmap_sem); count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); return; } count_vm_event(THP_COLLAPSE_ALLOC); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { up_read(&mm->mmap_sem); put_page(new_page); return; } #endif /* after allocating the hugepage upgrade to mmap_sem write mode */ up_read(&mm->mmap_sem); /* * Prevent all access to pagetables with the exception of * gup_fast later hanlded by the ptep_clear_flush and the VM * handled by the anon_vma lock + PG_lock. */ down_write(&mm->mmap_sem); if (unlikely(khugepaged_test_exit(mm))) goto out; vma = find_vma(mm, address); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (address < hstart || address + HPAGE_PMD_SIZE > hend) goto out; if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || (vma->vm_flags & VM_NOHUGEPAGE)) goto out; if (!vma->anon_vma || vma->vm_ops) goto out; if (is_vma_temporary_stack(vma)) goto out; /* * If is_pfn_mapping() is true is_learn_pfn_mapping() must be * true too, verify it here. */ VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; pud = pud_offset(pgd, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); /* pmd can't go away or become huge under us */ if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) goto out; anon_vma_lock(vma->anon_vma); pte = pte_offset_map(pmd, address); ptl = pte_lockptr(mm, pmd); spin_lock(&mm->page_table_lock); /* probably unnecessary */ /* * After this gup_fast can't run anymore. This also removes * any huge TLB entry from the CPU so we won't allow * huge and small TLB entries for the same virtual address * to avoid the risk of CPU bugs in that area. */ _pmd = pmdp_clear_flush_notify(vma, address, pmd); spin_unlock(&mm->page_table_lock); spin_lock(ptl); isolated = __collapse_huge_page_isolate(vma, address, pte); spin_unlock(ptl); if (unlikely(!isolated)) { pte_unmap(pte); spin_lock(&mm->page_table_lock); BUG_ON(!pmd_none(*pmd)); set_pmd_at(mm, address, pmd, _pmd); spin_unlock(&mm->page_table_lock); anon_vma_unlock(vma->anon_vma); goto out; } /* * All pages are isolated and locked so anon_vma rmap * can't run anymore. */ anon_vma_unlock(vma->anon_vma); __collapse_huge_page_copy(pte, new_page, vma, address, ptl); pte_unmap(pte); __SetPageUptodate(new_page); pgtable = pmd_pgtable(_pmd); VM_BUG_ON(page_count(pgtable) != 1); VM_BUG_ON(page_mapcount(pgtable) != 0); _pmd = mk_pmd(new_page, vma->vm_page_prot); _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); _pmd = pmd_mkhuge(_pmd); /* * spin_lock() below is not the equivalent of smp_wmb(), so * this is needed to avoid the copy_huge_page writes to become * visible after the set_pmd_at() write. */ smp_wmb(); spin_lock(&mm->page_table_lock); BUG_ON(!pmd_none(*pmd)); page_add_new_anon_rmap(new_page, vma, address); set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache(vma, address, entry); prepare_pmd_huge_pte(pgtable, mm); mm->nr_ptes--; spin_unlock(&mm->page_table_lock); #ifndef CONFIG_NUMA *hpage = NULL; #endif khugepaged_pages_collapsed++; out_up_write: up_write(&mm->mmap_sem); return; out: mem_cgroup_uncharge_page(new_page); #ifdef CONFIG_NUMA put_page(new_page); #endif goto out_up_write; } static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte, *_pte; int ret = 0, referenced = 0, none = 0; struct page *page; unsigned long _address; spinlock_t *ptl; int node = -1; VM_BUG_ON(address & ~HPAGE_PMD_MASK); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; pud = pud_offset(pgd, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) goto out; pte = pte_offset_map_lock(mm, pmd, address, &ptl); for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; if (pte_none(pteval)) { if (++none <= khugepaged_max_ptes_none) continue; else goto out_unmap; } if (!pte_present(pteval) || !pte_write(pteval)) goto out_unmap; page = vm_normal_page(vma, _address, pteval); if (unlikely(!page)) goto out_unmap; /* * Chose the node of the first page. This could * be more sophisticated and look at more pages, * but isn't for now. */ if (node == -1) node = page_to_nid(page); VM_BUG_ON(PageCompound(page)); if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) goto out_unmap; /* cannot use mapcount: can't collapse if there's a gup pin */ if (page_count(page) != 1) goto out_unmap; if (pte_young(pteval) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) referenced = 1; } if (referenced) ret = 1; out_unmap: pte_unmap_unlock(pte, ptl); if (ret) /* collapse_huge_page will return with the mmap_sem released */ collapse_huge_page(mm, address, hpage, vma, node); out: return ret; } static void collect_mm_slot(struct mm_slot *mm_slot) { struct mm_struct *mm = mm_slot->mm; VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); if (khugepaged_test_exit(mm)) { /* free mm_slot */ hlist_del(&mm_slot->hash); list_del(&mm_slot->mm_node); /* * Not strictly needed because the mm exited already. * * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); */ /* khugepaged_mm_lock actually not necessary for the below */ free_mm_slot(mm_slot); mmdrop(mm); } } static unsigned int khugepaged_scan_mm_slot(unsigned int pages, struct page **hpage) { struct mm_slot *mm_slot; struct mm_struct *mm; struct vm_area_struct *vma; int progress = 0; VM_BUG_ON(!pages); VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock)); if (khugepaged_scan.mm_slot) mm_slot = khugepaged_scan.mm_slot; else { mm_slot = list_entry(khugepaged_scan.mm_head.next, struct mm_slot, mm_node); khugepaged_scan.address = 0; khugepaged_scan.mm_slot = mm_slot; } spin_unlock(&khugepaged_mm_lock); mm = mm_slot->mm; down_read(&mm->mmap_sem); if (unlikely(khugepaged_test_exit(mm))) vma = NULL; else vma = find_vma(mm, khugepaged_scan.address); progress++; for (; vma; vma = vma->vm_next) { unsigned long hstart, hend; cond_resched(); if (unlikely(khugepaged_test_exit(mm))) { progress++; break; } if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || (vma->vm_flags & VM_NOHUGEPAGE)) { skip: progress++; continue; } if (!vma->anon_vma || vma->vm_ops) goto skip; if (is_vma_temporary_stack(vma)) goto skip; /* * If is_pfn_mapping() is true is_learn_pfn_mapping() * must be true too, verify it here. */ VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart >= hend) goto skip; if (khugepaged_scan.address > hend) goto skip; if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); while (khugepaged_scan.address < hend) { int ret; cond_resched(); if (unlikely(khugepaged_test_exit(mm))) goto breakouterloop; VM_BUG_ON(khugepaged_scan.address < hstart || khugepaged_scan.address + HPAGE_PMD_SIZE > hend); ret = khugepaged_scan_pmd(mm, vma, khugepaged_scan.address, hpage); /* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; progress += HPAGE_PMD_NR; if (ret) /* we released mmap_sem so break loop */ goto breakouterloop_mmap_sem; if (progress >= pages) goto breakouterloop; } } breakouterloop: up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ breakouterloop_mmap_sem: spin_lock(&khugepaged_mm_lock); VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); /* * Release the current mm_slot if this mm is about to die, or * if we scanned all vmas of this mm. */ if (khugepaged_test_exit(mm) || !vma) { /* * Make sure that if mm_users is reaching zero while * khugepaged runs here, khugepaged_exit will find * mm_slot not pointing to the exiting mm. */ if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { khugepaged_scan.mm_slot = list_entry( mm_slot->mm_node.next, struct mm_slot, mm_node); khugepaged_scan.address = 0; } else { khugepaged_scan.mm_slot = NULL; khugepaged_full_scans++; } collect_mm_slot(mm_slot); } return progress; } static int khugepaged_has_work(void) { return !list_empty(&khugepaged_scan.mm_head) && khugepaged_enabled(); } static int khugepaged_wait_event(void) { return !list_empty(&khugepaged_scan.mm_head) || !khugepaged_enabled(); } static void khugepaged_do_scan(struct page **hpage) { unsigned int progress = 0, pass_through_head = 0; unsigned int pages = khugepaged_pages_to_scan; barrier(); /* write khugepaged_pages_to_scan to local stack */ while (progress < pages) { cond_resched(); #ifndef CONFIG_NUMA if (!*hpage) { *hpage = alloc_hugepage(khugepaged_defrag()); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); break; } count_vm_event(THP_COLLAPSE_ALLOC); } #else if (IS_ERR(*hpage)) break; #endif if (unlikely(kthread_should_stop() || freezing(current))) break; spin_lock(&khugepaged_mm_lock); if (!khugepaged_scan.mm_slot) pass_through_head++; if (khugepaged_has_work() && pass_through_head < 2) progress += khugepaged_scan_mm_slot(pages - progress, hpage); else progress = pages; spin_unlock(&khugepaged_mm_lock); } } static void khugepaged_alloc_sleep(void) { DEFINE_WAIT(wait); add_wait_queue(&khugepaged_wait, &wait); schedule_timeout_interruptible( msecs_to_jiffies( khugepaged_alloc_sleep_millisecs)); remove_wait_queue(&khugepaged_wait, &wait); } #ifndef CONFIG_NUMA static struct page *khugepaged_alloc_hugepage(void) { struct page *hpage; do { hpage = alloc_hugepage(khugepaged_defrag()); if (!hpage) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); khugepaged_alloc_sleep(); } else count_vm_event(THP_COLLAPSE_ALLOC); } while (unlikely(!hpage) && likely(khugepaged_enabled())); return hpage; } #endif static void khugepaged_loop(void) { struct page *hpage; #ifdef CONFIG_NUMA hpage = NULL; #endif while (likely(khugepaged_enabled())) { #ifndef CONFIG_NUMA hpage = khugepaged_alloc_hugepage(); if (unlikely(!hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); break; } count_vm_event(THP_COLLAPSE_ALLOC); #else if (IS_ERR(hpage)) { khugepaged_alloc_sleep(); hpage = NULL; } #endif khugepaged_do_scan(&hpage); #ifndef CONFIG_NUMA if (hpage) put_page(hpage); #endif try_to_freeze(); if (unlikely(kthread_should_stop())) break; if (khugepaged_has_work()) { DEFINE_WAIT(wait); if (!khugepaged_scan_sleep_millisecs) continue; add_wait_queue(&khugepaged_wait, &wait); schedule_timeout_interruptible( msecs_to_jiffies( khugepaged_scan_sleep_millisecs)); remove_wait_queue(&khugepaged_wait, &wait); } else if (khugepaged_enabled()) wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); } } static int khugepaged(void *none) { struct mm_slot *mm_slot; set_freezable(); set_user_nice(current, 19); /* serialize with start_khugepaged() */ mutex_lock(&khugepaged_mutex); for (;;) { mutex_unlock(&khugepaged_mutex); VM_BUG_ON(khugepaged_thread != current); khugepaged_loop(); VM_BUG_ON(khugepaged_thread != current); mutex_lock(&khugepaged_mutex); if (!khugepaged_enabled()) break; if (unlikely(kthread_should_stop())) break; } spin_lock(&khugepaged_mm_lock); mm_slot = khugepaged_scan.mm_slot; khugepaged_scan.mm_slot = NULL; if (mm_slot) collect_mm_slot(mm_slot); spin_unlock(&khugepaged_mm_lock); khugepaged_thread = NULL; mutex_unlock(&khugepaged_mutex); return 0; } void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) { struct page *page; spin_lock(&mm->page_table_lock); if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(&mm->page_table_lock); return; } page = pmd_page(*pmd); VM_BUG_ON(!page_count(page)); get_page(page); spin_unlock(&mm->page_table_lock); split_huge_page(page); put_page(page); BUG_ON(pmd_trans_huge(*pmd)); } static void split_huge_page_address(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) return; pud = pud_offset(pgd, address); if (!pud_present(*pud)) return; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return; /* * Caller holds the mmap_sem write mode, so a huge pmd cannot * materialize from under us. */ split_huge_page_pmd(mm, pmd); } void __vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) { /* * If the new start address isn't hpage aligned and it could * previously contain an hugepage: check if we need to split * an huge pmd. */ if (start & ~HPAGE_PMD_MASK && (start & HPAGE_PMD_MASK) >= vma->vm_start && (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) split_huge_page_address(vma->vm_mm, start); /* * If the new end address isn't hpage aligned and it could * previously contain an hugepage: check if we need to split * an huge pmd. */ if (end & ~HPAGE_PMD_MASK && (end & HPAGE_PMD_MASK) >= vma->vm_start && (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) split_huge_page_address(vma->vm_mm, end); /* * If we're also updating the vma->vm_next->vm_start, if the new * vm_next->vm_start isn't page aligned and it could previously * contain an hugepage: check if we need to split an huge pmd. */ if (adjust_next > 0) { struct vm_area_struct *next = vma->vm_next; unsigned long nstart = next->vm_start; nstart += adjust_next << PAGE_SHIFT; if (nstart & ~HPAGE_PMD_MASK && (nstart & HPAGE_PMD_MASK) >= next->vm_start && (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) split_huge_page_address(next->vm_mm, nstart); } }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3468_2
crossvul-cpp_data_good_3486_15
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/m68k/kernel/ptrace.c" * Copyright (C) 1994 by Hamish Macdonald * Taken from linux/kernel/ptrace.c and modified for M680x0. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds * * Modified by Cort Dougan (cort@hq.fsmlabs.com) * and Paul Mackerras (paulus@samba.org). * * This file is subject to the terms and conditions of the GNU General * Public License. See the file README.legal in the main directory of * this archive for more details. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/tracehook.h> #include <linux/elf.h> #include <linux/user.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/seccomp.h> #include <linux/audit.h> #include <trace/syscall.h> #ifdef CONFIG_PPC32 #include <linux/module.h> #endif #include <linux/hw_breakpoint.h> #include <linux/perf_event.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/system.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> /* * The parameter save area on the stack is used to store arguments being passed * to callee function and is located at fixed offset from stack pointer. */ #ifdef CONFIG_PPC32 #define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */ #else /* CONFIG_PPC32 */ #define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */ #endif struct pt_regs_offset { const char *name; int offset; }; #define STR(s) #s /* convert to string */ #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} #define GPR_OFFSET_NAME(num) \ {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} #define REG_OFFSET_END {.name = NULL, .offset = 0} static const struct pt_regs_offset regoffset_table[] = { GPR_OFFSET_NAME(0), GPR_OFFSET_NAME(1), GPR_OFFSET_NAME(2), GPR_OFFSET_NAME(3), GPR_OFFSET_NAME(4), GPR_OFFSET_NAME(5), GPR_OFFSET_NAME(6), GPR_OFFSET_NAME(7), GPR_OFFSET_NAME(8), GPR_OFFSET_NAME(9), GPR_OFFSET_NAME(10), GPR_OFFSET_NAME(11), GPR_OFFSET_NAME(12), GPR_OFFSET_NAME(13), GPR_OFFSET_NAME(14), GPR_OFFSET_NAME(15), GPR_OFFSET_NAME(16), GPR_OFFSET_NAME(17), GPR_OFFSET_NAME(18), GPR_OFFSET_NAME(19), GPR_OFFSET_NAME(20), GPR_OFFSET_NAME(21), GPR_OFFSET_NAME(22), GPR_OFFSET_NAME(23), GPR_OFFSET_NAME(24), GPR_OFFSET_NAME(25), GPR_OFFSET_NAME(26), GPR_OFFSET_NAME(27), GPR_OFFSET_NAME(28), GPR_OFFSET_NAME(29), GPR_OFFSET_NAME(30), GPR_OFFSET_NAME(31), REG_OFFSET_NAME(nip), REG_OFFSET_NAME(msr), REG_OFFSET_NAME(ctr), REG_OFFSET_NAME(link), REG_OFFSET_NAME(xer), REG_OFFSET_NAME(ccr), #ifdef CONFIG_PPC64 REG_OFFSET_NAME(softe), #else REG_OFFSET_NAME(mq), #endif REG_OFFSET_NAME(trap), REG_OFFSET_NAME(dar), REG_OFFSET_NAME(dsisr), REG_OFFSET_END, }; /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL; */ int regs_query_register_offset(const char *name) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL; } /** * regs_query_register_name() - query register name from its offset * @offset: the offset of a register in struct pt_regs. * * regs_query_register_name() returns the name of a register from its * offset in struct pt_regs. If the @offset is invalid, this returns NULL; */ const char *regs_query_register_name(unsigned int offset) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (roff->offset == offset) return roff->name; return NULL; } /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * Set of msr bits that gdb can change on behalf of a process. */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS #define MSR_DEBUGCHANGE 0 #else #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) #endif /* * Max register writeable via put_reg */ #ifdef CONFIG_PPC32 #define PT_MAX_PUT_REG PT_MQ #else #define PT_MAX_PUT_REG PT_CCR #endif static unsigned long get_user_msr(struct task_struct *task) { return task->thread.regs->msr | task->thread.fpexc_mode; } static int set_user_msr(struct task_struct *task, unsigned long msr) { task->thread.regs->msr &= ~MSR_DEBUGCHANGE; task->thread.regs->msr |= msr & MSR_DEBUGCHANGE; return 0; } /* * We prevent mucking around with the reserved area of trap * which are used internally by the kernel. */ static int set_user_trap(struct task_struct *task, unsigned long trap) { task->thread.regs->trap = trap & 0xfff0; return 0; } /* * Get contents of register REGNO in task TASK. */ unsigned long ptrace_get_reg(struct task_struct *task, int regno) { if (task->thread.regs == NULL) return -EIO; if (regno == PT_MSR) return get_user_msr(task); if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) return ((unsigned long *)task->thread.regs)[regno]; return -EIO; } /* * Write contents of register REGNO in task TASK. */ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data) { if (task->thread.regs == NULL) return -EIO; if (regno == PT_MSR) return set_user_msr(task, data); if (regno == PT_TRAP) return set_user_trap(task, data); if (regno <= PT_MAX_PUT_REG) { ((unsigned long *)task->thread.regs)[regno] = data; return 0; } return -EIO; } static int gpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int i, ret; if (target->thread.regs == NULL) return -EIO; if (!FULL_REGS(target->thread.regs)) { /* We have a partial register set. Fill 14-31 with bogus values */ for (i = 14; i < 32; i++) target->thread.regs->gpr[i] = NV_REG_POISON; } ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, target->thread.regs, 0, offsetof(struct pt_regs, msr)); if (!ret) { unsigned long msr = get_user_msr(target); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr, offsetof(struct pt_regs, msr), offsetof(struct pt_regs, msr) + sizeof(msr)); } BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != offsetof(struct pt_regs, msr) + sizeof(long)); if (!ret) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.regs->orig_gpr3, offsetof(struct pt_regs, orig_gpr3), sizeof(struct pt_regs)); if (!ret) ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, sizeof(struct pt_regs), -1); return ret; } static int gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { unsigned long reg; int ret; if (target->thread.regs == NULL) return -EIO; CHECK_FULL_REGS(target->thread.regs); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, target->thread.regs, 0, PT_MSR * sizeof(reg)); if (!ret && count > 0) { ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg, PT_MSR * sizeof(reg), (PT_MSR + 1) * sizeof(reg)); if (!ret) ret = set_user_msr(target, reg); } BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != offsetof(struct pt_regs, msr) + sizeof(long)); if (!ret) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.regs->orig_gpr3, PT_ORIG_R3 * sizeof(reg), (PT_MAX_PUT_REG + 1) * sizeof(reg)); if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret) ret = user_regset_copyin_ignore( &pos, &count, &kbuf, &ubuf, (PT_MAX_PUT_REG + 1) * sizeof(reg), PT_TRAP * sizeof(reg)); if (!ret && count > 0) { ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg, PT_TRAP * sizeof(reg), (PT_TRAP + 1) * sizeof(reg)); if (!ret) ret = set_user_trap(target, reg); } if (!ret) ret = user_regset_copyin_ignore( &pos, &count, &kbuf, &ubuf, (PT_TRAP + 1) * sizeof(reg), -1); return ret; } static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { #ifdef CONFIG_VSX double buf[33]; int i; #endif flush_fp_to_thread(target); #ifdef CONFIG_VSX /* copy to local buffer then write that out */ for (i = 0; i < 32 ; i++) buf[i] = target->thread.TS_FPR(i); memcpy(&buf[32], &target->thread.fpscr, sizeof(double)); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); #else BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != offsetof(struct thread_struct, TS_FPR(32))); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fpr, 0, -1); #endif } static int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { #ifdef CONFIG_VSX double buf[33]; int i; #endif flush_fp_to_thread(target); #ifdef CONFIG_VSX /* copy to local buffer then write that out */ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); if (i) return i; for (i = 0; i < 32 ; i++) target->thread.TS_FPR(i) = buf[i]; memcpy(&target->thread.fpscr, &buf[32], sizeof(double)); return 0; #else BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != offsetof(struct thread_struct, TS_FPR(32))); return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fpr, 0, -1); #endif } #ifdef CONFIG_ALTIVEC /* * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. * The transfer totals 34 quadword. Quadwords 0-31 contain the * corresponding vector registers. Quadword 32 contains the vscr as the * last word (offset 12) within that quadword. Quadword 33 contains the * vrsave as the first word (offset 0) within the quadword. * * This definition of the VMX state is compatible with the current PPC32 * ptrace interface. This allows signal handling and ptrace to use the * same structures. This also simplifies the implementation of a bi-arch * (combined (32- and 64-bit) gdb. */ static int vr_active(struct task_struct *target, const struct user_regset *regset) { flush_altivec_to_thread(target); return target->thread.used_vr ? regset->n : 0; } static int vr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; flush_altivec_to_thread(target); BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != offsetof(struct thread_struct, vr[32])); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.vr, 0, 33 * sizeof(vector128)); if (!ret) { /* * Copy out only the low-order word of vrsave. */ union { elf_vrreg_t reg; u32 word; } vrsave; memset(&vrsave, 0, sizeof(vrsave)); vrsave.word = target->thread.vrsave; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, 33 * sizeof(vector128), -1); } return ret; } static int vr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; flush_altivec_to_thread(target); BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != offsetof(struct thread_struct, vr[32])); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.vr, 0, 33 * sizeof(vector128)); if (!ret && count > 0) { /* * We use only the first word of vrsave. */ union { elf_vrreg_t reg; u32 word; } vrsave; memset(&vrsave, 0, sizeof(vrsave)); vrsave.word = target->thread.vrsave; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave, 33 * sizeof(vector128), -1); if (!ret) target->thread.vrsave = vrsave.word; } return ret; } #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX /* * Currently to set and and get all the vsx state, you need to call * the fp and VMX calls as well. This only get/sets the lower 32 * 128bit VSX registers. */ static int vsr_active(struct task_struct *target, const struct user_regset *regset) { flush_vsx_to_thread(target); return target->thread.used_vsr ? regset->n : 0; } static int vsr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { double buf[32]; int ret, i; flush_vsx_to_thread(target); for (i = 0; i < 32 ; i++) buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET]; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); return ret; } static int vsr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { double buf[32]; int ret,i; flush_vsx_to_thread(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); for (i = 0; i < 32 ; i++) target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return ret; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* * For get_evrregs/set_evrregs functions 'data' has the following layout: * * struct { * u32 evr[32]; * u64 acc; * u32 spefscr; * } */ static int evr_active(struct task_struct *target, const struct user_regset *regset) { flush_spe_to_thread(target); return target->thread.used_spe ? regset->n : 0; } static int evr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; flush_spe_to_thread(target); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.evr, 0, sizeof(target->thread.evr)); BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) != offsetof(struct thread_struct, spefscr)); if (!ret) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.acc, sizeof(target->thread.evr), -1); return ret; } static int evr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; flush_spe_to_thread(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.evr, 0, sizeof(target->thread.evr)); BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) != offsetof(struct thread_struct, spefscr)); if (!ret) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.acc, sizeof(target->thread.evr), -1); return ret; } #endif /* CONFIG_SPE */ /* * These are our native regset flavors. */ enum powerpc_regset { REGSET_GPR, REGSET_FPR, #ifdef CONFIG_ALTIVEC REGSET_VMX, #endif #ifdef CONFIG_VSX REGSET_VSX, #endif #ifdef CONFIG_SPE REGSET_SPE, #endif }; static const struct user_regset native_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long), .get = gpr_get, .set = gpr_set }, [REGSET_FPR] = { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(double), .align = sizeof(double), .get = fpr_get, .set = fpr_set }, #ifdef CONFIG_ALTIVEC [REGSET_VMX] = { .core_note_type = NT_PPC_VMX, .n = 34, .size = sizeof(vector128), .align = sizeof(vector128), .active = vr_active, .get = vr_get, .set = vr_set }, #endif #ifdef CONFIG_VSX [REGSET_VSX] = { .core_note_type = NT_PPC_VSX, .n = 32, .size = sizeof(double), .align = sizeof(double), .active = vsr_active, .get = vsr_get, .set = vsr_set }, #endif #ifdef CONFIG_SPE [REGSET_SPE] = { .n = 35, .size = sizeof(u32), .align = sizeof(u32), .active = evr_active, .get = evr_get, .set = evr_set }, #endif }; static const struct user_regset_view user_ppc_native_view = { .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) }; #ifdef CONFIG_PPC64 #include <linux/compat.h> static int gpr32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const unsigned long *regs = &target->thread.regs->gpr[0]; compat_ulong_t *k = kbuf; compat_ulong_t __user *u = ubuf; compat_ulong_t reg; int i; if (target->thread.regs == NULL) return -EIO; if (!FULL_REGS(target->thread.regs)) { /* We have a partial register set. Fill 14-31 with bogus values */ for (i = 14; i < 32; i++) target->thread.regs->gpr[i] = NV_REG_POISON; } pos /= sizeof(reg); count /= sizeof(reg); if (kbuf) for (; count > 0 && pos < PT_MSR; --count) *k++ = regs[pos++]; else for (; count > 0 && pos < PT_MSR; --count) if (__put_user((compat_ulong_t) regs[pos++], u++)) return -EFAULT; if (count > 0 && pos == PT_MSR) { reg = get_user_msr(target); if (kbuf) *k++ = reg; else if (__put_user(reg, u++)) return -EFAULT; ++pos; --count; } if (kbuf) for (; count > 0 && pos < PT_REGS_COUNT; --count) *k++ = regs[pos++]; else for (; count > 0 && pos < PT_REGS_COUNT; --count) if (__put_user((compat_ulong_t) regs[pos++], u++)) return -EFAULT; kbuf = k; ubuf = u; pos *= sizeof(reg); count *= sizeof(reg); return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, PT_REGS_COUNT * sizeof(reg), -1); } static int gpr32_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { unsigned long *regs = &target->thread.regs->gpr[0]; const compat_ulong_t *k = kbuf; const compat_ulong_t __user *u = ubuf; compat_ulong_t reg; if (target->thread.regs == NULL) return -EIO; CHECK_FULL_REGS(target->thread.regs); pos /= sizeof(reg); count /= sizeof(reg); if (kbuf) for (; count > 0 && pos < PT_MSR; --count) regs[pos++] = *k++; else for (; count > 0 && pos < PT_MSR; --count) { if (__get_user(reg, u++)) return -EFAULT; regs[pos++] = reg; } if (count > 0 && pos == PT_MSR) { if (kbuf) reg = *k++; else if (__get_user(reg, u++)) return -EFAULT; set_user_msr(target, reg); ++pos; --count; } if (kbuf) { for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) regs[pos++] = *k++; for (; count > 0 && pos < PT_TRAP; --count, ++pos) ++k; } else { for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) { if (__get_user(reg, u++)) return -EFAULT; regs[pos++] = reg; } for (; count > 0 && pos < PT_TRAP; --count, ++pos) if (__get_user(reg, u++)) return -EFAULT; } if (count > 0 && pos == PT_TRAP) { if (kbuf) reg = *k++; else if (__get_user(reg, u++)) return -EFAULT; set_user_trap(target, reg); ++pos; --count; } kbuf = k; ubuf = u; pos *= sizeof(reg); count *= sizeof(reg); return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, (PT_TRAP + 1) * sizeof(reg), -1); } /* * These are the regset flavors matching the CONFIG_PPC32 native set. */ static const struct user_regset compat_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), .get = gpr32_get, .set = gpr32_set }, [REGSET_FPR] = { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(double), .align = sizeof(double), .get = fpr_get, .set = fpr_set }, #ifdef CONFIG_ALTIVEC [REGSET_VMX] = { .core_note_type = NT_PPC_VMX, .n = 34, .size = sizeof(vector128), .align = sizeof(vector128), .active = vr_active, .get = vr_get, .set = vr_set }, #endif #ifdef CONFIG_SPE [REGSET_SPE] = { .core_note_type = NT_PPC_SPE, .n = 35, .size = sizeof(u32), .align = sizeof(u32), .active = evr_active, .get = evr_get, .set = evr_set }, #endif }; static const struct user_regset_view user_ppc_compat_view = { .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI, .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) }; #endif /* CONFIG_PPC64 */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_PPC64 if (test_tsk_thread_flag(task, TIF_32BIT)) return &user_ppc_compat_view; #endif return &user_ppc_native_view; } void user_enable_single_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; if (regs != NULL) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS task->thread.dbcr0 &= ~DBCR0_BT; task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; regs->msr |= MSR_DE; #else regs->msr &= ~MSR_BE; regs->msr |= MSR_SE; #endif } set_tsk_thread_flag(task, TIF_SINGLESTEP); } void user_enable_block_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; if (regs != NULL) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS task->thread.dbcr0 &= ~DBCR0_IC; task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; regs->msr |= MSR_DE; #else regs->msr &= ~MSR_SE; regs->msr |= MSR_BE; #endif } set_tsk_thread_flag(task, TIF_SINGLESTEP); } void user_disable_single_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; if (regs != NULL) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* * The logic to disable single stepping should be as * simple as turning off the Instruction Complete flag. * And, after doing so, if all debug flags are off, turn * off DBCR0(IDM) and MSR(DE) .... Torez */ task->thread.dbcr0 &= ~DBCR0_IC; /* * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set. */ if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, task->thread.dbcr1)) { /* * All debug events were off..... */ task->thread.dbcr0 &= ~DBCR0_IDM; regs->msr &= ~MSR_DE; } #else regs->msr &= ~(MSR_SE | MSR_BE); #endif } clear_tsk_thread_flag(task, TIF_SINGLESTEP); } #ifdef CONFIG_HAVE_HW_BREAKPOINT void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event_attr attr; /* * Disable the breakpoint request here since ptrace has defined a * one-shot behaviour for breakpoint exceptions in PPC64. * The SIGTRAP signal is generated automatically for us in do_dabr(). * We don't have to do anything about that here */ attr = bp->attr; attr.disabled = true; modify_user_hw_breakpoint(bp, &attr); } #endif /* CONFIG_HAVE_HW_BREAKPOINT */ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data) { #ifdef CONFIG_HAVE_HW_BREAKPOINT int ret; struct thread_struct *thread = &(task->thread); struct perf_event *bp; struct perf_event_attr attr; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ /* For ppc64 we support one DABR and no IABR's at the moment (ppc64). * For embedded processors we support one DAC and no IAC's at the * moment. */ if (addr > 0) return -EINVAL; /* The bottom 3 bits in dabr are flags */ if ((data & ~0x7UL) >= TASK_SIZE) return -EIO; #ifndef CONFIG_PPC_ADV_DEBUG_REGS /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. * It was assumed, on previous implementations, that 3 bits were * passed together with the data address, fitting the design of the * DABR register, as follows: * * bit 0: Read flag * bit 1: Write flag * bit 2: Breakpoint translation * * Thus, we use them here as so. */ /* Ensure breakpoint translation bit is set */ if (data && !(data & DABR_TRANSLATION)) return -EIO; #ifdef CONFIG_HAVE_HW_BREAKPOINT if (ptrace_get_breakpoints(task) < 0) return -ESRCH; bp = thread->ptrace_bps[0]; if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; } ptrace_put_breakpoints(task); return 0; } if (bp) { attr = bp->attr; attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN; arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ), &attr.bp_type); ret = modify_user_hw_breakpoint(bp, &attr); if (ret) { ptrace_put_breakpoints(task); return ret; } thread->ptrace_bps[0] = bp; ptrace_put_breakpoints(task); thread->dabr = data; return 0; } /* Create a new breakpoint request if one doesn't exist already */ hw_breakpoint_init(&attr); attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN; arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ), &attr.bp_type); thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, ptrace_triggered, task); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; ptrace_put_breakpoints(task); return PTR_ERR(bp); } ptrace_put_breakpoints(task); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ /* Move contents to the DABR register */ task->thread.dabr = data; #else /* CONFIG_PPC_ADV_DEBUG_REGS */ /* As described above, it was assumed 3 bits were passed with the data * address, but we will assume only the mode bits will be passed * as to not cause alignment restrictions for DAC-based processors. */ /* DAC's hold the whole address without any mode flags */ task->thread.dac1 = data & ~0x3UL; if (task->thread.dac1 == 0) { dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, task->thread.dbcr1)) { task->thread.regs->msr &= ~MSR_DE; task->thread.dbcr0 &= ~DBCR0_IDM; } return 0; } /* Read or Write bits must be set */ if (!(data & 0x3UL)) return -EINVAL; /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 register */ task->thread.dbcr0 |= DBCR0_IDM; /* Check for write and read flags and set DBCR0 accordingly */ dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W); if (data & 0x1UL) dbcr_dac(task) |= DBCR_DAC1R; if (data & 0x2UL) dbcr_dac(task) |= DBCR_DAC1W; task->thread.regs->msr |= MSR_DE; #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ return 0; } /* * Called by kernel/ptrace.c when detaching.. * * Make sure single step bits etc are not set. */ void ptrace_disable(struct task_struct *child) { /* make sure the single step bit is not set. */ user_disable_single_step(child); } #ifdef CONFIG_PPC_ADV_DEBUG_REGS static long set_intruction_bp(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { int slot; int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0); int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0); int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0); int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0); if (dbcr_iac_range(child) & DBCR_IAC12MODE) slot2_in_use = 1; if (dbcr_iac_range(child) & DBCR_IAC34MODE) slot4_in_use = 1; if (bp_info->addr >= TASK_SIZE) return -EIO; if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) { /* Make sure range is valid. */ if (bp_info->addr2 >= TASK_SIZE) return -EIO; /* We need a pair of IAC regsisters */ if ((!slot1_in_use) && (!slot2_in_use)) { slot = 1; child->thread.iac1 = bp_info->addr; child->thread.iac2 = bp_info->addr2; child->thread.dbcr0 |= DBCR0_IAC1; if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) dbcr_iac_range(child) |= DBCR_IAC12X; else dbcr_iac_range(child) |= DBCR_IAC12I; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 } else if ((!slot3_in_use) && (!slot4_in_use)) { slot = 3; child->thread.iac3 = bp_info->addr; child->thread.iac4 = bp_info->addr2; child->thread.dbcr0 |= DBCR0_IAC3; if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) dbcr_iac_range(child) |= DBCR_IAC34X; else dbcr_iac_range(child) |= DBCR_IAC34I; #endif } else return -ENOSPC; } else { /* We only need one. If possible leave a pair free in * case a range is needed later */ if (!slot1_in_use) { /* * Don't use iac1 if iac1-iac2 are free and either * iac3 or iac4 (but not both) are free */ if (slot2_in_use || (slot3_in_use == slot4_in_use)) { slot = 1; child->thread.iac1 = bp_info->addr; child->thread.dbcr0 |= DBCR0_IAC1; goto out; } } if (!slot2_in_use) { slot = 2; child->thread.iac2 = bp_info->addr; child->thread.dbcr0 |= DBCR0_IAC2; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 } else if (!slot3_in_use) { slot = 3; child->thread.iac3 = bp_info->addr; child->thread.dbcr0 |= DBCR0_IAC3; } else if (!slot4_in_use) { slot = 4; child->thread.iac4 = bp_info->addr; child->thread.dbcr0 |= DBCR0_IAC4; #endif } else return -ENOSPC; } out: child->thread.dbcr0 |= DBCR0_IDM; child->thread.regs->msr |= MSR_DE; return slot; } static int del_instruction_bp(struct task_struct *child, int slot) { switch (slot) { case 1: if ((child->thread.dbcr0 & DBCR0_IAC1) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC12MODE) { /* address range - clear slots 1 & 2 */ child->thread.iac2 = 0; dbcr_iac_range(child) &= ~DBCR_IAC12MODE; } child->thread.iac1 = 0; child->thread.dbcr0 &= ~DBCR0_IAC1; break; case 2: if ((child->thread.dbcr0 & DBCR0_IAC2) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC12MODE) /* used in a range */ return -EINVAL; child->thread.iac2 = 0; child->thread.dbcr0 &= ~DBCR0_IAC2; break; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 case 3: if ((child->thread.dbcr0 & DBCR0_IAC3) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC34MODE) { /* address range - clear slots 3 & 4 */ child->thread.iac4 = 0; dbcr_iac_range(child) &= ~DBCR_IAC34MODE; } child->thread.iac3 = 0; child->thread.dbcr0 &= ~DBCR0_IAC3; break; case 4: if ((child->thread.dbcr0 & DBCR0_IAC4) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC34MODE) /* Used in a range */ return -EINVAL; child->thread.iac4 = 0; child->thread.dbcr0 &= ~DBCR0_IAC4; break; #endif default: return -EINVAL; } return 0; } static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { int byte_enable = (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT) & 0xf; int condition_mode = bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE; int slot; if (byte_enable && (condition_mode == 0)) return -EINVAL; if (bp_info->addr >= TASK_SIZE) return -EIO; if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) { slot = 1; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) dbcr_dac(child) |= DBCR_DAC1R; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) dbcr_dac(child) |= DBCR_DAC1W; child->thread.dac1 = (unsigned long)bp_info->addr; #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 if (byte_enable) { child->thread.dvc1 = (unsigned long)bp_info->condition_value; child->thread.dbcr2 |= ((byte_enable << DBCR2_DVC1BE_SHIFT) | (condition_mode << DBCR2_DVC1M_SHIFT)); } #endif #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) { /* Both dac1 and dac2 are part of a range */ return -ENOSPC; #endif } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) { slot = 2; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) dbcr_dac(child) |= DBCR_DAC2R; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) dbcr_dac(child) |= DBCR_DAC2W; child->thread.dac2 = (unsigned long)bp_info->addr; #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 if (byte_enable) { child->thread.dvc2 = (unsigned long)bp_info->condition_value; child->thread.dbcr2 |= ((byte_enable << DBCR2_DVC2BE_SHIFT) | (condition_mode << DBCR2_DVC2M_SHIFT)); } #endif } else return -ENOSPC; child->thread.dbcr0 |= DBCR0_IDM; child->thread.regs->msr |= MSR_DE; return slot + 4; } static int del_dac(struct task_struct *child, int slot) { if (slot == 1) { if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) return -ENOENT; child->thread.dac1 = 0; dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W); #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE if (child->thread.dbcr2 & DBCR2_DAC12MODE) { child->thread.dac2 = 0; child->thread.dbcr2 &= ~DBCR2_DAC12MODE; } child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); #endif #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 child->thread.dvc1 = 0; #endif } else if (slot == 2) { if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) return -ENOENT; #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE if (child->thread.dbcr2 & DBCR2_DAC12MODE) /* Part of a range */ return -EINVAL; child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); #endif #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 child->thread.dvc2 = 0; #endif child->thread.dac2 = 0; dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W); } else return -EINVAL; return 0; } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE static int set_dac_range(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK; /* We don't allow range watchpoints to be used with DVC */ if (bp_info->condition_mode) return -EINVAL; /* * Best effort to verify the address range. The user/supervisor bits * prevent trapping in kernel space, but let's fail on an obvious bad * range. The simple test on the mask is not fool-proof, and any * exclusive range will spill over into kernel space. */ if (bp_info->addr >= TASK_SIZE) return -EIO; if (mode == PPC_BREAKPOINT_MODE_MASK) { /* * dac2 is a bitmask. Don't allow a mask that makes a * kernel space address from a valid dac1 value */ if (~((unsigned long)bp_info->addr2) >= TASK_SIZE) return -EIO; } else { /* * For range breakpoints, addr2 must also be a valid address */ if (bp_info->addr2 >= TASK_SIZE) return -EIO; } if (child->thread.dbcr0 & (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W)) return -ENOSPC; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); child->thread.dac1 = bp_info->addr; child->thread.dac2 = bp_info->addr2; if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) child->thread.dbcr2 |= DBCR2_DAC12M; else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) child->thread.dbcr2 |= DBCR2_DAC12MX; else /* PPC_BREAKPOINT_MODE_MASK */ child->thread.dbcr2 |= DBCR2_DAC12MM; child->thread.regs->msr |= MSR_DE; return 5; } #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */ static long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { #ifndef CONFIG_PPC_ADV_DEBUG_REGS unsigned long dabr; #endif if (bp_info->version != 1) return -ENOTSUPP; #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* * Check for invalid flags and combinations */ if ((bp_info->trigger_type == 0) || (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE | PPC_BREAKPOINT_TRIGGER_RW)) || (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) || (bp_info->condition_mode & ~(PPC_BREAKPOINT_CONDITION_MODE | PPC_BREAKPOINT_CONDITION_BE_ALL))) return -EINVAL; #if CONFIG_PPC_ADV_DEBUG_DVCS == 0 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) return -EINVAL; #endif if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) { if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) || (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) return -EINVAL; return set_intruction_bp(child, bp_info); } if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) return set_dac(child, bp_info); #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE return set_dac_range(child, bp_info); #else return -EINVAL; #endif #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */ /* * We only support one data breakpoint */ if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 || (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 || bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT || bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) return -EINVAL; if (child->thread.dabr) return -ENOSPC; if ((unsigned long)bp_info->addr >= TASK_SIZE) return -EIO; dabr = (unsigned long)bp_info->addr & ~7UL; dabr |= DABR_TRANSLATION; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) dabr |= DABR_DATA_READ; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) dabr |= DABR_DATA_WRITE; child->thread.dabr = dabr; return 1; #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ } static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS int rc; if (data <= 4) rc = del_instruction_bp(child, (int)data); else rc = del_dac(child, (int)data - 4); if (!rc) { if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0, child->thread.dbcr1)) { child->thread.dbcr0 &= ~DBCR0_IDM; child->thread.regs->msr &= ~MSR_DE; } } return rc; #else if (data != 1) return -EINVAL; if (child->thread.dabr == 0) return -ENOENT; child->thread.dabr = 0; return 0; #endif } /* * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, * we mark them as obsolete now, they will be removed in a future version */ static long arch_ptrace_old(struct task_struct *child, long request, unsigned long addr, unsigned long data) { void __user *datavp = (void __user *) data; switch (request) { case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_GPR, 0, 32 * sizeof(long), datavp); case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_GPR, 0, 32 * sizeof(long), datavp); case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_FPR, 0, 32 * sizeof(double), datavp); case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_FPR, 0, 32 * sizeof(double), datavp); } return -EPERM; } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret = -EPERM; void __user *datavp = (void __user *) data; unsigned long __user *datalp = datavp; switch (request) { /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { unsigned long index, tmp; ret = -EIO; /* convert to index and check */ #ifdef CONFIG_PPC32 index = addr >> 2; if ((addr & 3) || (index > PT_FPSCR) || (child->thread.regs == NULL)) #else index = addr >> 3; if ((addr & 7) || (index > PT_FPSCR)) #endif break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { tmp = ptrace_get_reg(child, (int) index); } else { flush_fp_to_thread(child); tmp = ((unsigned long *)child->thread.fpr) [TS_FPRWIDTH * (index - PT_FPR0)]; } ret = put_user(tmp, datalp); break; } /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: { unsigned long index; ret = -EIO; /* convert to index and check */ #ifdef CONFIG_PPC32 index = addr >> 2; if ((addr & 3) || (index > PT_FPSCR) || (child->thread.regs == NULL)) #else index = addr >> 3; if ((addr & 7) || (index > PT_FPSCR)) #endif break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { flush_fp_to_thread(child); ((unsigned long *)child->thread.fpr) [TS_FPRWIDTH * (index - PT_FPR0)] = data; ret = 0; } break; } case PPC_PTRACE_GETHWDBGINFO: { struct ppc_debug_info dbginfo; dbginfo.version = 1; #ifdef CONFIG_PPC_ADV_DEBUG_REGS dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS; dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS; dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS; dbginfo.data_bp_alignment = 4; dbginfo.sizeof_condition = 4; dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE | PPC_DEBUG_FEATURE_INSN_BP_MASK; #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_RANGE | PPC_DEBUG_FEATURE_DATA_BP_MASK; #endif #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ dbginfo.num_instruction_bps = 0; dbginfo.num_data_bps = 1; dbginfo.num_condition_regs = 0; #ifdef CONFIG_PPC64 dbginfo.data_bp_alignment = 8; #else dbginfo.data_bp_alignment = 4; #endif dbginfo.sizeof_condition = 0; dbginfo.features = 0; #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ if (!access_ok(VERIFY_WRITE, datavp, sizeof(struct ppc_debug_info))) return -EFAULT; ret = __copy_to_user(datavp, &dbginfo, sizeof(struct ppc_debug_info)) ? -EFAULT : 0; break; } case PPC_PTRACE_SETHWDEBUG: { struct ppc_hw_breakpoint bp_info; if (!access_ok(VERIFY_READ, datavp, sizeof(struct ppc_hw_breakpoint))) return -EFAULT; ret = __copy_from_user(&bp_info, datavp, sizeof(struct ppc_hw_breakpoint)) ? -EFAULT : 0; if (!ret) ret = ppc_set_hwdebug(child, &bp_info); break; } case PPC_PTRACE_DELHWDEBUG: { ret = ppc_del_hwdebug(child, addr, data); break; } case PTRACE_GET_DEBUGREG: { ret = -EINVAL; /* We only support one DABR and no IABRS at the moment */ if (addr > 0) break; #ifdef CONFIG_PPC_ADV_DEBUG_REGS ret = put_user(child->thread.dac1, datalp); #else ret = put_user(child->thread.dabr, datalp); #endif break; } case PTRACE_SET_DEBUGREG: ret = ptrace_set_debugreg(child, addr, data); break; #ifdef CONFIG_PPC64 case PTRACE_GETREGS64: #endif case PTRACE_GETREGS: /* Get all pt_regs from the child. */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_GPR, 0, sizeof(struct pt_regs), datavp); #ifdef CONFIG_PPC64 case PTRACE_SETREGS64: #endif case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_GPR, 0, sizeof(struct pt_regs), datavp); case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_FPR, 0, sizeof(elf_fpregset_t), datavp); case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_FPR, 0, sizeof(elf_fpregset_t), datavp); #ifdef CONFIG_ALTIVEC case PTRACE_GETVRREGS: return copy_regset_to_user(child, &user_ppc_native_view, REGSET_VMX, 0, (33 * sizeof(vector128) + sizeof(u32)), datavp); case PTRACE_SETVRREGS: return copy_regset_from_user(child, &user_ppc_native_view, REGSET_VMX, 0, (33 * sizeof(vector128) + sizeof(u32)), datavp); #endif #ifdef CONFIG_VSX case PTRACE_GETVSRREGS: return copy_regset_to_user(child, &user_ppc_native_view, REGSET_VSX, 0, 32 * sizeof(double), datavp); case PTRACE_SETVSRREGS: return copy_regset_from_user(child, &user_ppc_native_view, REGSET_VSX, 0, 32 * sizeof(double), datavp); #endif #ifdef CONFIG_SPE case PTRACE_GETEVRREGS: /* Get the child spe register state. */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_SPE, 0, 35 * sizeof(u32), datavp); case PTRACE_SETEVRREGS: /* Set the child spe register state. */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_SPE, 0, 35 * sizeof(u32), datavp); #endif /* Old reverse args ptrace callss */ case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */ ret = arch_ptrace_old(child, request, addr, data); break; default: ret = ptrace_request(child, request, addr, data); break; } return ret; } /* * We must return the syscall number to actually look up in the table. * This can be -1L to skip running any syscall at all. */ long do_syscall_trace_enter(struct pt_regs *regs) { long ret = 0; secure_computing(regs->gpr[0]); if (test_thread_flag(TIF_SYSCALL_TRACE) && tracehook_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS * error, but leave the original number in regs->gpr[0]. */ ret = -1L; if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->gpr[0]); if (unlikely(current->audit_context)) { #ifdef CONFIG_PPC64 if (!is_32bit_task()) audit_syscall_entry(AUDIT_ARCH_PPC64, regs->gpr[0], regs->gpr[3], regs->gpr[4], regs->gpr[5], regs->gpr[6]); else #endif audit_syscall_entry(AUDIT_ARCH_PPC, regs->gpr[0], regs->gpr[3] & 0xffffffff, regs->gpr[4] & 0xffffffff, regs->gpr[5] & 0xffffffff, regs->gpr[6] & 0xffffffff); } return ret ?: regs->gpr[0]; } void do_syscall_trace_leave(struct pt_regs *regs) { int step; if (unlikely(current->audit_context)) audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, regs->result); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_exit(regs, regs->result); step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); }
./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_15
crossvul-cpp_data_bad_5785_0
/* * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall <c.dall@virtualopensystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/mman.h> #include <linux/sched.h> #include <linux/kvm.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "trace.h" #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/mman.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/virt.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> #include <asm/kvm_emulate.h> #include <asm/kvm_coproc.h> #include <asm/kvm_psci.h> #ifdef REQUIRES_VIRT __asm__(".arch_extension virt"); #endif static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); static kvm_cpu_context_t __percpu *kvm_host_cpu_state; static unsigned long hyp_default_vectors; /* Per-CPU variable containing the currently running vcpu. */ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); /* The VMID used in the VTTBR */ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); static u8 kvm_next_vmid; static DEFINE_SPINLOCK(kvm_vmid_lock); static bool vgic_present; static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) { BUG_ON(preemptible()); __get_cpu_var(kvm_arm_running_vcpu) = vcpu; } /** * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU. * Must be called from non-preemptible context */ struct kvm_vcpu *kvm_arm_get_running_vcpu(void) { BUG_ON(preemptible()); return __get_cpu_var(kvm_arm_running_vcpu); } /** * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. */ struct kvm_vcpu __percpu **kvm_get_running_vcpus(void) { return &kvm_arm_running_vcpu; } int kvm_arch_hardware_enable(void *garbage) { return 0; } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; } void kvm_arch_hardware_disable(void *garbage) { } int kvm_arch_hardware_setup(void) { return 0; } void kvm_arch_hardware_unsetup(void) { } void kvm_arch_check_processor_compat(void *rtn) { *(int *)rtn = 0; } void kvm_arch_sync_events(struct kvm *kvm) { } /** * kvm_arch_init_vm - initializes a VM data structure * @kvm: pointer to the KVM struct */ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret = 0; if (type) return -EINVAL; ret = kvm_alloc_stage2_pgd(kvm); if (ret) goto out_fail_alloc; ret = create_hyp_mappings(kvm, kvm + 1); if (ret) goto out_free_stage2_pgd; /* Mark the initial VMID generation invalid */ kvm->arch.vmid_gen = 0; return ret; out_free_stage2_pgd: kvm_free_stage2_pgd(kvm); out_fail_alloc: return ret; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { return 0; } /** * kvm_arch_destroy_vm - destroy the VM data structure * @kvm: pointer to the KVM struct */ void kvm_arch_destroy_vm(struct kvm *kvm) { int i; kvm_free_stage2_pgd(kvm); for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (kvm->vcpus[i]) { kvm_arch_vcpu_free(kvm->vcpus[i]); kvm->vcpus[i] = NULL; } } } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: r = vgic_present; break; case KVM_CAP_USER_MEMORY: case KVM_CAP_SYNC_MMU: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_ONE_REG: case KVM_CAP_ARM_PSCI: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; break; case KVM_CAP_NR_VCPUS: r = num_online_cpus(); break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; default: r = kvm_arch_dev_ioctl_check_extension(ext); break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -EINVAL; } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, enum kvm_mr_change change) { } void kvm_arch_flush_shadow_all(struct kvm *kvm) { } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err; struct kvm_vcpu *vcpu; vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu) { err = -ENOMEM; goto out; } err = kvm_vcpu_init(vcpu, kvm, id); if (err) goto free_vcpu; err = create_hyp_mappings(vcpu, vcpu + 1); if (err) goto vcpu_uninit; return vcpu; vcpu_uninit: kvm_vcpu_uninit(vcpu); free_vcpu: kmem_cache_free(kvm_vcpu_cache, vcpu); out: return ERR_PTR(err); } int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { return 0; } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvm_mmu_free_memory_caches(vcpu); kvm_timer_vcpu_terminate(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_arch_vcpu_free(vcpu); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return 0; } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { int ret; /* Force users to call KVM_ARM_VCPU_INIT */ vcpu->arch.target = -1; /* Set up VGIC */ ret = kvm_vgic_vcpu_init(vcpu); if (ret) return ret; /* Set up the timer */ kvm_timer_vcpu_init(vcpu); return 0; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { vcpu->cpu = cpu; vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); /* * Check whether this vcpu requires the cache to be flushed on * this physical CPU. This is a consequence of doing dcache * operations by set/way on this vcpu. We do it here to be in * a non-preemptible section. */ if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ kvm_arm_set_running_vcpu(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_arm_set_running_vcpu(NULL); } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { return -EINVAL; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; } /** * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled * @v: The VCPU pointer * * If the guest CPU is not waiting for interrupts or an interrupt line is * asserted, the CPU is by definition runnable. */ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v); } /* Just ensure a guest exit from a particular CPU */ static void exit_vm_noop(void *info) { } void force_vm_exit(const cpumask_t *mask) { smp_call_function_many(mask, exit_vm_noop, NULL, true); } /** * need_new_vmid_gen - check that the VMID is still valid * @kvm: The VM's VMID to checkt * * return true if there is a new generation of VMIDs being used * * The hardware supports only 256 values with the value zero reserved for the * host, so we check if an assigned value belongs to a previous generation, * which which requires us to assign a new value. If we're the first to use a * VMID for the new generation, we must flush necessary caches and TLBs on all * CPUs. */ static bool need_new_vmid_gen(struct kvm *kvm) { return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); } /** * update_vttbr - Update the VTTBR with a valid VMID before the guest runs * @kvm The guest that we are about to run * * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the * VM has a valid VMID, otherwise assigns a new one and flushes corresponding * caches and TLBs. */ static void update_vttbr(struct kvm *kvm) { phys_addr_t pgd_phys; u64 vmid; if (!need_new_vmid_gen(kvm)) return; spin_lock(&kvm_vmid_lock); /* * We need to re-check the vmid_gen here to ensure that if another vcpu * already allocated a valid vmid for this vm, then this vcpu should * use the same vmid. */ if (!need_new_vmid_gen(kvm)) { spin_unlock(&kvm_vmid_lock); return; } /* First user of a new VMID generation? */ if (unlikely(kvm_next_vmid == 0)) { atomic64_inc(&kvm_vmid_gen); kvm_next_vmid = 1; /* * On SMP we know no other CPUs can use this CPU's or each * other's VMID after force_vm_exit returns since the * kvm_vmid_lock blocks them from reentry to the guest. */ force_vm_exit(cpu_all_mask); /* * Now broadcast TLB + ICACHE invalidation over the inner * shareable domain to make sure all data structures are * clean. */ kvm_call_hyp(__kvm_flush_vm_context); } kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); kvm->arch.vmid = kvm_next_vmid; kvm_next_vmid++; /* update vttbr to be used with the new vmid */ pgd_phys = virt_to_phys(kvm->arch.pgd); vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; kvm->arch.vttbr |= vmid; spin_unlock(&kvm_vmid_lock); } static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) { if (likely(vcpu->arch.has_run_once)) return 0; vcpu->arch.has_run_once = true; /* * Initialize the VGIC before running a vcpu the first time on * this VM. */ if (irqchip_in_kernel(vcpu->kvm) && unlikely(!vgic_initialized(vcpu->kvm))) { int ret = kvm_vgic_init(vcpu->kvm); if (ret) return ret; } /* * Handle the "start in power-off" case by calling into the * PSCI code. */ if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; kvm_psci_call(vcpu); } return 0; } static void vcpu_pause(struct kvm_vcpu *vcpu) { wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); wait_event_interruptible(*wq, !vcpu->arch.pause); } /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer * @run: The kvm_run structure pointer used for userspace state exchange * * This function is called through the VCPU_RUN ioctl called from user space. It * will execute VM code in a loop until the time slice for the process is used * or some emulation is needed from user space in which case the function will * return with return value 0 and with the kvm_run structure filled in with the * required data for the requested emulation. */ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret; sigset_t sigsaved; /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ if (unlikely(vcpu->arch.target < 0)) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); if (ret) return ret; if (run->exit_reason == KVM_EXIT_MMIO) { ret = kvm_handle_mmio_return(vcpu, vcpu->run); if (ret) return ret; } if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); ret = 1; run->exit_reason = KVM_EXIT_UNKNOWN; while (ret > 0) { /* * Check conditions before entering the guest */ cond_resched(); update_vttbr(vcpu->kvm); if (vcpu->arch.pause) vcpu_pause(vcpu); kvm_vgic_flush_hwstate(vcpu); kvm_timer_flush_hwstate(vcpu); local_irq_disable(); /* * Re-check atomic conditions */ if (signal_pending(current)) { ret = -EINTR; run->exit_reason = KVM_EXIT_INTR; } if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { local_irq_enable(); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); continue; } /************************************************************** * Enter the guest */ trace_kvm_entry(*vcpu_pc(vcpu)); kvm_guest_enter(); vcpu->mode = IN_GUEST_MODE; ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->arch.last_pcpu = smp_processor_id(); kvm_guest_exit(); trace_kvm_exit(*vcpu_pc(vcpu)); /* * We may have taken a host interrupt in HYP mode (ie * while executing the guest). This interrupt is still * pending, as we haven't serviced it yet! * * We're now back in SVC mode, with interrupts * disabled. Enabling the interrupts now will have * the effect of taking the interrupt again, in SVC * mode this time. */ local_irq_enable(); /* * Back from guest *************************************************************/ kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); ret = handle_exit(vcpu, run, ret); } if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return ret; } static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) { int bit_index; bool set; unsigned long *ptr; if (number == KVM_ARM_IRQ_CPU_IRQ) bit_index = __ffs(HCR_VI); else /* KVM_ARM_IRQ_CPU_FIQ */ bit_index = __ffs(HCR_VF); ptr = (unsigned long *)&vcpu->arch.irq_lines; if (level) set = test_and_set_bit(bit_index, ptr); else set = test_and_clear_bit(bit_index, ptr); /* * If we didn't change anything, no need to wake up or kick other CPUs */ if (set == level) return 0; /* * The vcpu irq_lines field was updated, wake up sleeping VCPUs and * trigger a world-switch round on the running physical CPU to set the * virtual IRQ/FIQ fields in the HCR appropriately. */ kvm_vcpu_kick(vcpu); return 0; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status) { u32 irq = irq_level->irq; unsigned int irq_type, vcpu_idx, irq_num; int nrcpus = atomic_read(&kvm->online_vcpus); struct kvm_vcpu *vcpu = NULL; bool level = irq_level->level; irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); switch (irq_type) { case KVM_ARM_IRQ_TYPE_CPU: if (irqchip_in_kernel(kvm)) return -ENXIO; if (vcpu_idx >= nrcpus) return -EINVAL; vcpu = kvm_get_vcpu(kvm, vcpu_idx); if (!vcpu) return -EINVAL; if (irq_num > KVM_ARM_IRQ_CPU_FIQ) return -EINVAL; return vcpu_interrupt_line(vcpu, irq_num, level); case KVM_ARM_IRQ_TYPE_PPI: if (!irqchip_in_kernel(kvm)) return -ENXIO; if (vcpu_idx >= nrcpus) return -EINVAL; vcpu = kvm_get_vcpu(kvm, vcpu_idx); if (!vcpu) return -EINVAL; if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) return -EINVAL; return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); case KVM_ARM_IRQ_TYPE_SPI: if (!irqchip_in_kernel(kvm)) return -ENXIO; if (irq_num < VGIC_NR_PRIVATE_IRQS || irq_num > KVM_ARM_IRQ_GIC_MAX) return -EINVAL; return kvm_vgic_inject_irq(kvm, 0, irq_num, level); } return -EINVAL; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; switch (ioctl) { case KVM_ARM_VCPU_INIT: { struct kvm_vcpu_init init; if (copy_from_user(&init, argp, sizeof(init))) return -EFAULT; return kvm_vcpu_set_target(vcpu, &init); } case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) return kvm_arm_set_reg(vcpu, &reg); else return kvm_arm_get_reg(vcpu, &reg); } case KVM_GET_REG_LIST: { struct kvm_reg_list __user *user_list = argp; struct kvm_reg_list reg_list; unsigned n; if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n; reg_list.n = kvm_arm_num_regs(vcpu); if (copy_to_user(user_list, &reg_list, sizeof(reg_list))) return -EFAULT; if (n < reg_list.n) return -E2BIG; return kvm_arm_copy_reg_indices(vcpu, user_list->reg); } default: return -EINVAL; } } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { return -EINVAL; } static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr) { unsigned long dev_id, type; dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> KVM_ARM_DEVICE_ID_SHIFT; type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> KVM_ARM_DEVICE_TYPE_SHIFT; switch (dev_id) { case KVM_ARM_DEVICE_VGIC_V2: if (!vgic_present) return -ENXIO; return kvm_vgic_set_addr(kvm, type, dev_addr->addr); default: return -ENODEV; } } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; switch (ioctl) { case KVM_CREATE_IRQCHIP: { if (vgic_present) return kvm_vgic_create(kvm); else return -ENXIO; } case KVM_ARM_SET_DEVICE_ADDR: { struct kvm_arm_device_addr dev_addr; if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) return -EFAULT; return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); } default: return -EINVAL; } } static void cpu_init_hyp_mode(void *dummy) { unsigned long long boot_pgd_ptr; unsigned long long pgd_ptr; unsigned long hyp_stack_ptr; unsigned long stack_page; unsigned long vector_ptr; /* Switch from the HYP stub to our own HYP init vector */ __hyp_set_vectors(kvm_get_idmap_vector()); boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr(); pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); hyp_stack_ptr = stack_page + PAGE_SIZE; vector_ptr = (unsigned long)__kvm_hyp_vector; __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); } static int hyp_init_cpu_notify(struct notifier_block *self, unsigned long action, void *cpu) { switch (action) { case CPU_STARTING: case CPU_STARTING_FROZEN: cpu_init_hyp_mode(NULL); break; } return NOTIFY_OK; } static struct notifier_block hyp_init_cpu_nb = { .notifier_call = hyp_init_cpu_notify, }; /** * Inits Hyp-mode on all online CPUs */ static int init_hyp_mode(void) { int cpu; int err = 0; /* * Allocate Hyp PGD and setup Hyp identity mapping */ err = kvm_mmu_init(); if (err) goto out_err; /* * It is probably enough to obtain the default on one * CPU. It's unlikely to be different on the others. */ hyp_default_vectors = __hyp_get_vectors(); /* * Allocate stack pages for Hypervisor-mode */ for_each_possible_cpu(cpu) { unsigned long stack_page; stack_page = __get_free_page(GFP_KERNEL); if (!stack_page) { err = -ENOMEM; goto out_free_stack_pages; } per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; } /* * Map the Hyp-code called directly from the host */ err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); if (err) { kvm_err("Cannot map world-switch code\n"); goto out_free_mappings; } /* * Map the Hyp stack pages */ for_each_possible_cpu(cpu) { char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); if (err) { kvm_err("Cannot map hyp stack\n"); goto out_free_mappings; } } /* * Map the host CPU structures */ kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); if (!kvm_host_cpu_state) { err = -ENOMEM; kvm_err("Cannot allocate host CPU state\n"); goto out_free_mappings; } for_each_possible_cpu(cpu) { kvm_cpu_context_t *cpu_ctxt; cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1); if (err) { kvm_err("Cannot map host CPU state: %d\n", err); goto out_free_context; } } /* * Execute the init code on each CPU. */ on_each_cpu(cpu_init_hyp_mode, NULL, 1); /* * Init HYP view of VGIC */ err = kvm_vgic_hyp_init(); if (err) goto out_free_context; #ifdef CONFIG_KVM_ARM_VGIC vgic_present = true; #endif /* * Init HYP architected timer support */ err = kvm_timer_hyp_init(); if (err) goto out_free_mappings; #ifndef CONFIG_HOTPLUG_CPU free_boot_hyp_pgd(); #endif kvm_perf_init(); kvm_info("Hyp mode initialized successfully\n"); return 0; out_free_context: free_percpu(kvm_host_cpu_state); out_free_mappings: free_hyp_pgds(); out_free_stack_pages: for_each_possible_cpu(cpu) free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); out_err: kvm_err("error initializing Hyp mode: %d\n", err); return err; } static void check_kvm_target_cpu(void *ret) { *(int *)ret = kvm_target_cpu(); } /** * Initialize Hyp-mode and memory mappings on all CPUs. */ int kvm_arch_init(void *opaque) { int err; int ret, cpu; if (!is_hyp_mode_available()) { kvm_err("HYP mode not available\n"); return -ENODEV; } for_each_online_cpu(cpu) { smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); if (ret < 0) { kvm_err("Error, CPU %d not supported!\n", cpu); return -ENODEV; } } err = init_hyp_mode(); if (err) goto out_err; err = register_cpu_notifier(&hyp_init_cpu_nb); if (err) { kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); goto out_err; } kvm_coproc_table_init(); return 0; out_err: return err; } /* NOP: Compiling as a module not supported */ void kvm_arch_exit(void) { kvm_perf_teardown(); } static int arm_init(void) { int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); return rc; } module_init(arm_init);
./CrossVul/dataset_final_sorted/CWE-399/c/bad_5785_0
crossvul-cpp_data_bad_2166_1
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2002 Intel Corp. * * This file is part of the SCTP kernel implementation * * These functions work with the state functions in sctp_sm_statefuns.c * to implement the state operations. These functions implement the * steps which require modifying existing data structures. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * C. Robin <chris@hundredacre.ac.uk> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Dajiang Zhang <dajiang.zhang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/skbuff.h> #include <linux/random.h> /* for get_random_bytes */ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen); static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len); static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp); static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data); static void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data); /* Control chunk destructor */ static void sctp_control_release_owner(struct sk_buff *skb) { /*TODO: do memory release */ } static void sctp_control_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sk_buff *skb = chunk->skb; /* TODO: properly account for control chunks. * To do it right we'll need: * 1) endpoint if association isn't known. * 2) proper memory accounting. * * For now don't do anything for now. */ skb->sk = asoc ? asoc->base.sk : NULL; skb->destructor = sctp_control_release_owner; } /* What was the inbound interface for this chunk? */ int sctp_chunk_iif(const struct sctp_chunk *chunk) { struct sctp_af *af; int iif = 0; af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version)); if (af) iif = af->skb_iif(chunk->skb); return iif; } /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 2: The ECN capable field is reserved for future use of * Explicit Congestion Notification. */ static const struct sctp_paramhdr ecap_param = { SCTP_PARAM_ECN_CAPABLE, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; static const struct sctp_paramhdr prsctp_param = { SCTP_PARAM_FWD_TSN_SUPPORT, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. */ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); } /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. Differs from sctp_init_cause in that it won't oops * if there isn't enough space in the op error chunk */ static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); if (skb_tailroom(chunk->skb) < len) return -ENOSPC; chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, sizeof(sctp_errhdr_t), &err); return 0; } /* 3.3.2 Initiation (INIT) (1) * * This chunk is used to initiate a SCTP association between two * endpoints. The format of the INIT chunk is shown below: * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initiate Tag | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Advertised Receiver Window Credit (a_rwnd) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Outbound Streams | Number of Inbound Streams | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initial TSN | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / Optional/Variable-Length Parameters / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * * The INIT chunk contains the following parameters. Unless otherwise * noted, each parameter MUST only be included once in the INIT chunk. * * Fixed Parameters Status * ---------------------------------------------- * Initiate Tag Mandatory * Advertised Receiver Window Credit Mandatory * Number of Outbound Streams Mandatory * Number of Inbound Streams Mandatory * Initial TSN Mandatory * * Variable Parameters Status Type Value * ------------------------------------------------------------- * IPv4 Address (Note 1) Optional 5 * IPv6 Address (Note 1) Optional 6 * Cookie Preservative Optional 9 * Reserved for ECN Capable (Note 2) Optional 32768 (0x8000) * Host Name Address (Note 3) Optional 11 * Supported Address Types (Note 4) Optional 12 */ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, const struct sctp_bind_addr *bp, gfp_t gfp, int vparam_len) { struct net *net = sock_net(asoc->base.sk); struct sctp_endpoint *ep = asoc->ep; sctp_inithdr_t init; union sctp_params addrs; size_t chunksize; struct sctp_chunk *retval = NULL; int num_types, addrs_len = 0; struct sctp_sock *sp; sctp_supported_addrs_param_t sat; __be16 types[2]; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL; /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 1: The INIT chunks can contain multiple addresses that * can be IPv4 and/or IPv6 in any combination. */ retval = NULL; /* Convert the provided bind address list to raw format. */ addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); init.init_tag = htonl(asoc->c.my_vtag); init.a_rwnd = htonl(asoc->rwnd); init.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); init.num_inbound_streams = htons(asoc->c.sinit_max_instreams); init.initial_tsn = htonl(asoc->c.initial_tsn); /* How many address types are needed? */ sp = sctp_sk(asoc->base.sk); num_types = sp->pf->supported_addrs(sp, types); chunksize = sizeof(init) + addrs_len; chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); chunksize += sizeof(ecap_param); if (net->sctp.prsctp_enable) chunksize += sizeof(prsctp_param); /* ADDIP: Section 4.2.7: * An implementation supporting this extension [ADDIP] MUST list * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and * INIT-ACK parameters. */ if (net->sctp.addip_enable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); chunksize += vparam_len; /* Account for AUTH related parameters */ if (ep->auth_enable) { /* Add random parameter length*/ chunksize += sizeof(asoc->c.auth_random); /* Add HMACS parameter length if any were defined */ auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; /* Add CHUNKS parameter length */ auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } /* If we have any extensions to report, account for that */ if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 3: An INIT chunk MUST NOT contain more than one Host * Name address parameter. Moreover, the sender of the INIT * MUST NOT combine any other address types with the Host Name * address in the INIT. The receiver of INIT MUST ignore any * other address types if the Host Name address parameter is * present in the received INIT chunk. * * PLEASE DO NOT FIXME [This version does not support Host Name.] */ retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize); if (!retval) goto nodata; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(init), &init); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 4: This parameter, when present, specifies all the * address types the sending endpoint can support. The absence * of this parameter indicates that the sending endpoint can * support any address type. */ sat.param_hdr.type = SCTP_PARAM_SUPPORTED_ADDRESS_TYPES; sat.param_hdr.length = htons(SCTP_SAT_LEN(num_types)); sctp_addto_chunk(retval, sizeof(sat), &sat); sctp_addto_chunk(retval, num_types * sizeof(__u16), &types); sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); /* Add the supported extensions parameter. Be nice and add this * fist before addiding the parameters for the extensions themselves */ if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (net->sctp.prsctp_enable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } /* Add SCTP-AUTH chunks to the parameter list */ if (ep->auth_enable) { sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), asoc->c.auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } nodata: kfree(addrs.v); return retval; } struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, gfp_t gfp, int unkparam_len) { sctp_inithdr_t initack; struct sctp_chunk *retval; union sctp_params addrs; struct sctp_sock *sp; int addrs_len; sctp_cookie_param_t *cookie; int cookie_len; size_t chunksize; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL, *auth_random = NULL; retval = NULL; /* Note: there may be no addresses to embed. */ addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); initack.init_tag = htonl(asoc->c.my_vtag); initack.a_rwnd = htonl(asoc->rwnd); initack.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); initack.num_inbound_streams = htons(asoc->c.sinit_max_instreams); initack.initial_tsn = htonl(asoc->c.initial_tsn); /* FIXME: We really ought to build the cookie right * into the packet instead of allocating more fresh memory. */ cookie = sctp_pack_cookie(asoc->ep, asoc, chunk, &cookie_len, addrs.v, addrs_len); if (!cookie) goto nomem_cookie; /* Calculate the total size of allocation, include the reserved * space for reporting unknown parameters if it is specified. */ sp = sctp_sk(asoc->base.sk); chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; /* Tell peer that we'll do ECN only if peer advertised such cap. */ if (asoc->peer.ecn_capable) chunksize += sizeof(ecap_param); if (asoc->peer.prsctp_capable) chunksize += sizeof(prsctp_param); if (asoc->peer.asconf_capable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); if (asoc->peer.auth_capable) { auth_random = (sctp_paramhdr_t *)asoc->c.auth_random; chunksize += ntohs(auth_random->length); auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* Now allocate and fill out the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize); if (!retval) goto nomem_chunk; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * * [INIT ACK back to where the INIT came from.] */ retval->transport = chunk->transport; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(initack), &initack); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); sctp_addto_chunk(retval, cookie_len, cookie); if (asoc->peer.ecn_capable) sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (asoc->peer.prsctp_capable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } if (asoc->peer.auth_capable) { sctp_addto_chunk(retval, ntohs(auth_random->length), auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } /* We need to remove the const qualifier at this point. */ retval->asoc = (struct sctp_association *) asoc; nomem_chunk: kfree(cookie); nomem_cookie: kfree(addrs.v); return retval; } /* 3.3.11 Cookie Echo (COOKIE ECHO) (10): * * This chunk is used only during the initialization of an association. * It is sent by the initiator of an association to its peer to complete * the initialization process. This chunk MUST precede any DATA chunk * sent within the association, but MAY be bundled with one or more DATA * chunks in the same packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 10 |Chunk Flags | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * / Cookie / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bit * * Set to zero on transmit and ignored on receipt. * * Length: 16 bits (unsigned integer) * * Set to the size of the chunk in bytes, including the 4 bytes of * the chunk header and the size of the Cookie. * * Cookie: variable size * * This field must contain the exact cookie received in the * State Cookie parameter from the previous INIT ACK. * * An implementation SHOULD make the cookie as small as possible * to insure interoperability. */ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; void *cookie; int cookie_len; cookie = asoc->peer.cookie; cookie_len = asoc->peer.cookie_len; /* Build a cookie echo chunk. */ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); if (!retval) goto nodata; retval->subh.cookie_hdr = sctp_addto_chunk(retval, cookie_len, cookie); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ECHO back to where the INIT ACK came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* 3.3.12 Cookie Acknowledgement (COOKIE ACK) (11): * * This chunk is used only during the initialization of an * association. It is used to acknowledge the receipt of a COOKIE * ECHO chunk. This chunk MUST precede any DATA or SACK chunk sent * within the association, but MAY be bundled with one or more DATA * chunks or SACK chunk in the same SCTP packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 11 |Chunk Flags | Length = 4 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bits * * Set to zero on transmit and ignored on receipt. */ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ACK back to where the COOKIE ECHO came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* * Appendix A: Explicit Congestion Notification: * CWR: * * RFC 2481 details a specific bit for a sender to send in the header of * its next outbound TCP segment to indicate to its peer that it has * reduced its congestion window. This is termed the CWR bit. For * SCTP the same indication is made by including the CWR chunk. * This chunk contains one data element, i.e. the TSN number that * was sent in the ECNE chunk. This element represents the lowest * TSN number in the datagram that was originally marked with the * CE bit. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Chunk Type=13 | Flags=00000000| Chunk Length = 8 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Lowest TSN Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Note: The CWR is considered a Control chunk. */ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, const __u32 lowest_tsn, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_cwrhdr_t cwr; cwr.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0, sizeof(sctp_cwrhdr_t)); if (!retval) goto nodata; retval->subh.ecn_cwr_hdr = sctp_addto_chunk(retval, sizeof(cwr), &cwr); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report a reduced congestion window back to where the ECNE * came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Make an ECNE chunk. This is a congestion experienced report. */ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, const __u32 lowest_tsn) { struct sctp_chunk *retval; sctp_ecnehdr_t ecne; ecne.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0, sizeof(sctp_ecnehdr_t)); if (!retval) goto nodata; retval->subh.ecne_hdr = sctp_addto_chunk(retval, sizeof(ecne), &ecne); nodata: return retval; } /* Make a DATA chunk for the given association from the provided * parameters. However, do not populate the data payload. */ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, int data_len, __u8 flags, __u16 ssn) { struct sctp_chunk *retval; struct sctp_datahdr dp; int chunk_len; /* We assign the TSN as LATE as possible, not here when * creating the chunk. */ dp.tsn = 0; dp.stream = htons(sinfo->sinfo_stream); dp.ppid = sinfo->sinfo_ppid; /* Set the flags for an unordered send. */ if (sinfo->sinfo_flags & SCTP_UNORDERED) { flags |= SCTP_DATA_UNORDERED; dp.ssn = 0; } else dp.ssn = htons(ssn); chunk_len = sizeof(dp) + data_len; retval = sctp_make_data(asoc, flags, chunk_len); if (!retval) goto nodata; retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); nodata: return retval; } /* Create a selective ackowledgement (SACK) for the given * association. This reports on which TSN's we've seen to date, * including duplicates and gaps. */ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_sackhdr sack; int len; __u32 ctsn; __u16 num_gabs, num_dup_tsns; struct sctp_association *aptr = (struct sctp_association *)asoc; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; struct sctp_transport *trans; memset(gabs, 0, sizeof(gabs)); ctsn = sctp_tsnmap_get_ctsn(map); pr_debug("%s: sackCTSNAck sent:0x%x\n", __func__, ctsn); /* How much room is needed in the chunk? */ num_gabs = sctp_tsnmap_num_gabs(map, gabs); num_dup_tsns = sctp_tsnmap_num_dups(map); /* Initialize the SACK header. */ sack.cum_tsn_ack = htonl(ctsn); sack.a_rwnd = htonl(asoc->a_rwnd); sack.num_gap_ack_blocks = htons(num_gabs); sack.num_dup_tsns = htons(num_dup_tsns); len = sizeof(sack) + sizeof(struct sctp_gap_ack_block) * num_gabs + sizeof(__u32) * num_dup_tsns; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk to * which it is replying. This rule should also be followed if * the endpoint is bundling DATA chunks together with the * reply chunk. * * However, when acknowledging multiple DATA chunks received * in packets from different source addresses in a single * SACK, the SACK chunk may be transmitted to one of the * destination transport addresses from which the DATA or * control chunks being acknowledged were received. * * [BUG: We do not implement the following paragraph. * Perhaps we should remember the last transport we used for a * SACK and avoid that (if possible) if we have seen any * duplicates. --piggy] * * When a receiver of a duplicate DATA chunk sends a SACK to a * multi- homed endpoint it MAY be beneficial to vary the * destination address and not use the source address of the * DATA chunk. The reason being that receiving a duplicate * from a multi-homed endpoint might indicate that the return * path (as specified in the source address of the DATA chunk) * for the SACK is broken. * * [Send to the address from which we last received a DATA chunk.] */ retval->transport = asoc->peer.last_data_from; retval->subh.sack_hdr = sctp_addto_chunk(retval, sizeof(sack), &sack); /* Add the gap ack block information. */ if (num_gabs) sctp_addto_chunk(retval, sizeof(__u32) * num_gabs, gabs); /* Add the duplicate TSN information. */ if (num_dup_tsns) { aptr->stats.idupchunks += num_dup_tsns; sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, sctp_tsnmap_get_dups(map)); } /* Once we have a sack generated, check to see what our sack * generation is, if its 0, reset the transports to 0, and reset * the association generation to 1 * * The idea is that zero is never used as a valid generation for the * association so no transport will match after a wrap event like this, * Until the next sack */ if (++aptr->peer.sack_generation == 0) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) trans->sack_generation = 0; aptr->peer.sack_generation = 1; } nodata: return retval; } /* Make a SHUTDOWN chunk. */ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_shutdownhdr_t shut; __u32 ctsn; ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, sizeof(sctp_shutdownhdr_t)); if (!retval) goto nodata; retval->subh.shutdown_hdr = sctp_addto_chunk(retval, sizeof(shut), &shut); if (chunk) retval->transport = chunk->transport; nodata: return retval; } struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ACK back to where the SHUTDOWN came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } struct sctp_chunk *sctp_make_shutdown_complete( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association (vtag will be * reflected) */ flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report SHUTDOWN COMPLETE back to where the SHUTDOWN ACK * came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Create an ABORT. Note that we set the T bit if we have no * association, except when responding to an INIT (sctpimpguide 2.41). */ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const size_t hint) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association and 'chunk' is not * an INIT (vtag will be reflected). */ if (!asoc) { if (chunk && chunk->chunk_hdr && chunk->chunk_hdr->type == SCTP_CID_INIT) flags = 0; else flags = SCTP_CHUNK_FLAG_T; } retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Helper to create ABORT with a NO_USER_DATA error. */ struct sctp_chunk *sctp_make_abort_no_data( const struct sctp_association *asoc, const struct sctp_chunk *chunk, __u32 tsn) { struct sctp_chunk *retval; __be32 payload; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + sizeof(tsn)); if (!retval) goto no_mem; /* Put the tsn back into network byte order. */ payload = htonl(tsn); sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (chunk) retval->transport = chunk->transport; no_mem: return retval; } /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, const struct msghdr *msg, size_t paylen) { struct sctp_chunk *retval; void *payload = NULL; int err; retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); if (!retval) goto err_chunk; if (paylen) { /* Put the msg_iov together into payload. */ payload = kmalloc(paylen, GFP_KERNEL); if (!payload) goto err_payload; err = memcpy_fromiovec(payload, msg->msg_iov, paylen); if (err < 0) goto err_copy; } sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); sctp_addto_chunk(retval, paylen, payload); if (paylen) kfree(payload); return retval; err_copy: kfree(payload); err_payload: sctp_chunk_free(retval); retval = NULL; err_chunk: return retval; } /* Append bytes to the end of a parameter. Will panic if chunk is not big * enough. */ static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) { void *target; int chunklen = ntohs(chunk->chunk_hdr->length); target = skb_put(chunk->skb, len); if (data) memcpy(target, data, len); else memset(target, 0, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ struct sctp_chunk *sctp_make_abort_violation( const struct sctp_association *asoc, const struct sctp_chunk *chunk, const __u8 *payload, const size_t paylen) { struct sctp_chunk *retval; struct sctp_paramhdr phdr; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen + sizeof(sctp_paramhdr_t)); if (!retval) goto end; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen + sizeof(sctp_paramhdr_t)); phdr.type = htons(chunk->chunk_hdr->type); phdr.length = chunk->chunk_hdr->length; sctp_addto_chunk(retval, paylen, payload); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); end: return retval; } struct sctp_chunk *sctp_make_violation_paramlen( const struct sctp_association *asoc, const struct sctp_chunk *chunk, struct sctp_paramhdr *param) { struct sctp_chunk *retval; static const char error[] = "The following parameter had invalid length:"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) + sizeof(sctp_paramhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error) + sizeof(sctp_paramhdr_t)); sctp_addto_chunk(retval, sizeof(error), error); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), param); nodata: return retval; } struct sctp_chunk *sctp_make_violation_max_retrans( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; static const char error[] = "Association exceeded its max_retans count"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error)); sctp_addto_chunk(retval, sizeof(error), error); nodata: return retval; } /* Make a HEARTBEAT chunk. */ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, const struct sctp_transport *transport) { struct sctp_chunk *retval; sctp_sender_hb_info_t hbinfo; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo)); if (!retval) goto nodata; hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO; hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); hbinfo.daddr = transport->ipaddr; hbinfo.sent_at = jiffies; hbinfo.hb_nonce = transport->hb_nonce; /* Cast away the 'const', as this is just telling the chunk * what transport it belongs to. */ retval->transport = (struct sctp_transport *) transport; retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo), &hbinfo); nodata: return retval; } struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const void *payload, const size_t paylen) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); if (!retval) goto nodata; retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [HBACK back to where the HEARTBEAT came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk with the specified space reserved. * This routine can be used for containing multiple causes in the chunk. */ static struct sctp_chunk *sctp_make_op_error_space( const struct sctp_association *asoc, const struct sctp_chunk *chunk, size_t size) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, sizeof(sctp_errhdr_t) + size); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk of a fixed size, * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) * This is a helper function to allocate an error chunk for * for those invalid parameter codes in which we may not want * to report all the errors, if the incoming chunk is large */ static inline struct sctp_chunk *sctp_make_op_error_fixed( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { size_t size = asoc ? asoc->pathmtu : 0; if (!size) size = SCTP_DEFAULT_MAXSEGMENT; return sctp_make_op_error_space(asoc, chunk, size); } /* Create an Operation Error chunk. */ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, const struct sctp_chunk *chunk, __be16 cause_code, const void *payload, size_t paylen, size_t reserve_tail) { struct sctp_chunk *retval; retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail); if (!retval) goto nodata; sctp_init_cause(retval, cause_code, paylen + reserve_tail); sctp_addto_chunk(retval, paylen, payload); if (reserve_tail) sctp_addto_param(retval, reserve_tail, NULL); nodata: return retval; } struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_hmac *hmac_desc; struct sctp_authhdr auth_hdr; __u8 *hmac; /* Get the first hmac that the peer told us to use */ hmac_desc = sctp_auth_asoc_get_hmac(asoc); if (unlikely(!hmac_desc)) return NULL; retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0, hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); if (!retval) return NULL; auth_hdr.hmac_id = htons(hmac_desc->hmac_id); auth_hdr.shkey_id = htons(asoc->active_key_id); retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t), &auth_hdr); hmac = skb_put(retval->skb, hmac_desc->hmac_len); memset(hmac, 0, hmac_desc->hmac_len); /* Adjust the chunk header to include the empty MAC */ retval->chunk_hdr->length = htons(ntohs(retval->chunk_hdr->length) + hmac_desc->hmac_len); retval->chunk_end = skb_tail_pointer(retval->skb); return retval; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* Turn an skb into a chunk. * FIXME: Eventually move the structure directly inside the skb->cb[]. * * sctpimpguide-05.txt Section 2.8.2 * M1) Each time a new DATA chunk is transmitted * set the 'TSN.Missing.Report' count for that TSN to 0. The * 'TSN.Missing.Report' count will be used to determine missing chunks * and when to fast retransmit. * */ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, const struct sctp_association *asoc, struct sock *sk) { struct sctp_chunk *retval; retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC); if (!retval) goto nodata; if (!sk) pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb); INIT_LIST_HEAD(&retval->list); retval->skb = skb; retval->asoc = (struct sctp_association *)asoc; retval->singleton = 1; retval->fast_retransmit = SCTP_CAN_FRTX; /* Polish the bead hole. */ INIT_LIST_HEAD(&retval->transmitted_list); INIT_LIST_HEAD(&retval->frag_list); SCTP_DBG_OBJCNT_INC(chunk); atomic_set(&retval->refcnt, 1); nodata: return retval; } /* Set chunk->source and dest based on the IP header in chunk->skb. */ void sctp_init_addrs(struct sctp_chunk *chunk, union sctp_addr *src, union sctp_addr *dest) { memcpy(&chunk->source, src, sizeof(union sctp_addr)); memcpy(&chunk->dest, dest, sizeof(union sctp_addr)); } /* Extract the source address from a chunk. */ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) { /* If we have a known transport, use that. */ if (chunk->transport) { return &chunk->transport->ipaddr; } else { /* Otherwise, extract it from the IP header. */ return &chunk->source; } } /* Create a new chunk, setting the type and flags headers from the * arguments, reserving enough space for a 'paylen' byte payload. */ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *retval; sctp_chunkhdr_t *chunk_hdr; struct sk_buff *skb; struct sock *sk; /* No need to allocate LL here, as this is only a chunk. */ skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), GFP_ATOMIC); if (!skb) goto nodata; /* Make room for the chunk header. */ chunk_hdr = (sctp_chunkhdr_t *)skb_put(skb, sizeof(sctp_chunkhdr_t)); chunk_hdr->type = type; chunk_hdr->flags = flags; chunk_hdr->length = htons(sizeof(sctp_chunkhdr_t)); sk = asoc ? asoc->base.sk : NULL; retval = sctp_chunkify(skb, asoc, sk); if (!retval) { kfree_skb(skb); goto nodata; } retval->chunk_hdr = chunk_hdr; retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(struct sctp_chunkhdr); /* Determine if the chunk needs to be authenticated */ if (sctp_auth_send_cid(type, asoc)) retval->auth = 1; return retval; nodata: return NULL; } static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen) { return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen); } static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen); if (chunk) sctp_control_set_owner_w(chunk); return chunk; } /* Release the memory occupied by a chunk. */ static void sctp_chunk_destroy(struct sctp_chunk *chunk) { BUG_ON(!list_empty(&chunk->list)); list_del_init(&chunk->transmitted_list); consume_skb(chunk->skb); consume_skb(chunk->auth_chunk); SCTP_DBG_OBJCNT_DEC(chunk); kmem_cache_free(sctp_chunk_cachep, chunk); } /* Possibly, free the chunk. */ void sctp_chunk_free(struct sctp_chunk *chunk) { /* Release our reference on the message tracker. */ if (chunk->msg) sctp_datamsg_put(chunk->msg); sctp_chunk_put(chunk); } /* Grab a reference to the chunk. */ void sctp_chunk_hold(struct sctp_chunk *ch) { atomic_inc(&ch->refcnt); } /* Release a reference to the chunk. */ void sctp_chunk_put(struct sctp_chunk *ch) { if (atomic_dec_and_test(&ch->refcnt)) sctp_chunk_destroy(ch); } /* Append bytes to the end of a chunk. Will panic if chunk is not big * enough. */ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) { void *target; void *padding; int chunklen = ntohs(chunk->chunk_hdr->length); int padlen = WORD_ROUND(chunklen) - chunklen; padding = skb_put(chunk->skb, padlen); target = skb_put(chunk->skb, len); memset(padding, 0, padlen); memcpy(target, data, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + padlen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient * space in the chunk */ static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, int len, const void *data) { if (skb_tailroom(chunk->skb) >= len) return sctp_addto_chunk(chunk, len, data); else return NULL; } /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. */ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, struct iovec *data) { __u8 *target; int err = 0; /* Make room in chunk for data. */ target = skb_put(chunk->skb, len); /* Copy data (whole iovec) into chunk */ if ((err = memcpy_fromiovecend(target, data, off, len))) goto out; /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(ntohs(chunk->chunk_hdr->length) + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); out: return err; } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) { struct sctp_datamsg *msg; struct sctp_chunk *lchunk; struct sctp_stream *stream; __u16 ssn; __u16 sid; if (chunk->has_ssn) return; /* All fragments will be on the same stream */ sid = ntohs(chunk->subh.data_hdr->stream); stream = &chunk->asoc->ssnmap->out; /* Now assign the sequence number to the entire message. * All fragments must have the same stream sequence number. */ msg = chunk->msg; list_for_each_entry(lchunk, &msg->chunks, frag_list) { if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { ssn = 0; } else { if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) ssn = sctp_ssn_next(stream, sid); else ssn = sctp_ssn_peek(stream, sid); } lchunk->subh.data_hdr->ssn = htons(ssn); lchunk->has_ssn = 1; } } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) { if (!chunk->has_tsn) { /* This is the last possible instant to * assign a TSN. */ chunk->subh.data_hdr->tsn = htonl(sctp_association_get_next_tsn(chunk->asoc)); chunk->has_tsn = 1; } } /* Create a CLOSED association to use with an incoming packet. */ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_association *asoc; struct sk_buff *skb; sctp_scope_t scope; struct sctp_af *af; /* Create the bare association. */ scope = sctp_scope(sctp_source(chunk)); asoc = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!asoc) goto nodata; asoc->temp = 1; skb = chunk->skb; /* Create an entry for the source address of the packet. */ af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version)); if (unlikely(!af)) goto fail; af->from_skb(&asoc->c.peer_addr, skb, 1); nodata: return asoc; fail: sctp_association_free(asoc); return NULL; } /* Build a cookie representing asoc. * This INCLUDES the param header needed to put the cookie in the INIT ACK. */ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len) { sctp_cookie_param_t *retval; struct sctp_signed_cookie *cookie; struct scatterlist sg; int headersize, bodysize; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_paramhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = sizeof(struct sctp_cookie) + ntohs(init_chunk->chunk_hdr->length) + addrs_len; /* Pad out the cookie to a multiple to make the signature * functions simpler to write. */ if (bodysize % SCTP_COOKIE_MULTIPLE) bodysize += SCTP_COOKIE_MULTIPLE - (bodysize % SCTP_COOKIE_MULTIPLE); *cookie_len = headersize + bodysize; /* Clear this memory since we are sending this data structure * out on the network. */ retval = kzalloc(*cookie_len, GFP_ATOMIC); if (!retval) goto nodata; cookie = (struct sctp_signed_cookie *) retval->body; /* Set up the parameter header. */ retval->p.type = SCTP_PARAM_STATE_COOKIE; retval->p.length = htons(*cookie_len); /* Copy the cookie part of the association itself. */ cookie->c = asoc->c; /* Save the raw address list length in the cookie. */ cookie->c.raw_addr_list_len = addrs_len; /* Remember PR-SCTP capability. */ cookie->c.prsctp_capable = asoc->peer.prsctp_capable; /* Save adaptation indication in the cookie. */ cookie->c.adaptation_ind = asoc->peer.adaptation_ind; /* Set an expiration time for the cookie. */ cookie->c.expiration = ktime_add(asoc->cookie_life, ktime_get()); /* Copy the peer's init packet. */ memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, ntohs(init_chunk->chunk_hdr->length)); /* Copy the raw local address list of the association. */ memcpy((__u8 *)&cookie->c.peer_init[0] + ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); if (sctp_sk(ep->base.sk)->hmac) { struct hash_desc desc; /* Sign the message. */ sg_init_one(&sg, &cookie->c, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) goto free_cookie; } return retval; free_cookie: kfree(retval); nodata: *cookie_len = 0; return NULL; } /* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ struct sctp_association *sctp_unpack_cookie( const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, gfp_t gfp, int *error, struct sctp_chunk **errp) { struct sctp_association *retval = NULL; struct sctp_signed_cookie *cookie; struct sctp_cookie *bear_cookie; int headersize, bodysize, fixed_size; __u8 *digest = ep->digest; struct scatterlist sg; unsigned int len; sctp_scope_t scope; struct sk_buff *skb = chunk->skb; ktime_t kt; struct hash_desc desc; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_chunkhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = ntohs(chunk->chunk_hdr->length) - headersize; fixed_size = headersize + sizeof(struct sctp_cookie); /* Verify that the chunk looks like it even has a cookie. * There must be enough room for our cookie and our peer's * INIT chunk. */ len = ntohs(chunk->chunk_hdr->length); if (len < fixed_size + sizeof(struct sctp_chunkhdr)) goto malformed; /* Verify that the cookie has been padded out. */ if (bodysize % SCTP_COOKIE_MULTIPLE) goto malformed; /* Process the cookie. */ cookie = chunk->subh.cookie_hdr; bear_cookie = &cookie->c; if (!sctp_sk(ep->base.sk)->hmac) goto no_hmac; /* Check the signature. */ sg_init_one(&sg, bear_cookie, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; memset(digest, 0x00, SCTP_SIGNATURE_SIZE); if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, digest)) { *error = -SCTP_IERROR_NOMEM; goto fail; } if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { *error = -SCTP_IERROR_BAD_SIG; goto fail; } no_hmac: /* IG Section 2.35.2: * 3) Compare the port numbers and the verification tag contained * within the COOKIE ECHO chunk to the actual port numbers and the * verification tag within the SCTP common header of the received * packet. If these values do not match the packet MUST be silently * discarded, */ if (ntohl(chunk->sctp_hdr->vtag) != bear_cookie->my_vtag) { *error = -SCTP_IERROR_BAD_TAG; goto fail; } if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port || ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { *error = -SCTP_IERROR_BAD_PORTS; goto fail; } /* Check to see if the cookie is stale. If there is already * an association, there is no need to check cookie's expiration * for init collision case of lost COOKIE ACK. * If skb has been timestamped, then use the stamp, otherwise * use current time. This introduces a small possibility that * that a cookie may be considered expired, but his would only slow * down the new association establishment instead of every packet. */ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) kt = skb_get_ktime(skb); else kt = ktime_get(); if (!asoc && ktime_before(bear_cookie->expiration, kt)) { /* * Section 3.3.10.3 Stale Cookie Error (3) * * Cause of error * --------------- * Stale Cookie Error: Indicates the receipt of a valid State * Cookie that has expired. */ len = ntohs(chunk->chunk_hdr->length); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); __be32 n = htonl(usecs); sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, sizeof(n)); sctp_addto_chunk(*errp, sizeof(n), &n); *error = -SCTP_IERROR_STALE_COOKIE; } else *error = -SCTP_IERROR_NOMEM; goto fail; } /* Make a new base association. */ scope = sctp_scope(sctp_source(chunk)); retval = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!retval) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Set up our peer's port number. */ retval->peer.port = ntohs(chunk->sctp_hdr->source); /* Populate the association from the cookie. */ memcpy(&retval->c, bear_cookie, sizeof(*bear_cookie)); if (sctp_assoc_set_bind_addr_from_cookie(retval, bear_cookie, GFP_ATOMIC) < 0) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Also, add the destination address. */ if (list_empty(&retval->base.bind_addr.address_list)) { sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, SCTP_ADDR_SRC, GFP_ATOMIC); } retval->next_tsn = retval->c.initial_tsn; retval->ctsn_ack_point = retval->next_tsn - 1; retval->addip_serial = retval->c.initial_tsn; retval->adv_peer_ack_point = retval->ctsn_ack_point; retval->peer.prsctp_capable = retval->c.prsctp_capable; retval->peer.adaptation_ind = retval->c.adaptation_ind; /* The INIT stuff will be done by the side effects. */ return retval; fail: if (retval) sctp_association_free(retval); return NULL; malformed: /* Yikes! The packet is either corrupt or deliberately * malformed. */ *error = -SCTP_IERROR_MALFORMED; goto fail; } /******************************************************************** * 3rd Level Abstractions ********************************************************************/ struct __sctp_missing { __be32 num_missing; __be16 type; } __packed; /* * Report a missing mandatory parameter. */ static int sctp_process_missing_param(const struct sctp_association *asoc, sctp_param_t paramtype, struct sctp_chunk *chunk, struct sctp_chunk **errp) { struct __sctp_missing report; __u16 len; len = WORD_ROUND(sizeof(report)); /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { report.num_missing = htonl(1); report.type = paramtype; sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, sizeof(report)); sctp_addto_chunk(*errp, sizeof(report), &report); } /* Stop processing this chunk. */ return 0; } /* Report an Invalid Mandatory Parameter. */ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* Invalid Mandatory Parameter Error has no payload. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, 0); if (*errp) sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); /* Stop processing this chunk. */ return 0; } static int sctp_process_inv_paramlength(const struct sctp_association *asoc, struct sctp_paramhdr *param, const struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* This is a fatal error. Any accumulated non-fatal errors are * not reported. */ if (*errp) sctp_chunk_free(*errp); /* Create an error chunk and fill it in with our payload. */ *errp = sctp_make_violation_paramlen(asoc, chunk, param); return 0; } /* Do not attempt to handle the HOST_NAME parm. However, do * send back an indicator to the peer. */ static int sctp_process_hn_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { __u16 len = ntohs(param.p->length); /* Processing of the HOST_NAME parameter will generate an * ABORT. If we've accumulated any non-fatal errors, they * would be unrecognized parameters and we should not include * them in the ABORT. */ if (*errp) sctp_chunk_free(*errp); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); sctp_addto_chunk(*errp, len, param.v); } /* Stop processing this chunk. */ return 0; } static int sctp_verify_ext_param(struct net *net, union sctp_params param) { __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int have_auth = 0; int have_asconf = 0; int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_AUTH: have_auth = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: have_asconf = 1; break; } } /* ADD-IP Security: The draft requires us to ABORT or ignore the * INIT/INIT-ACK if ADD-IP is listed, but AUTH is not. Do this * only if ADD-IP is turned on and we are not backward-compatible * mode. */ if (net->sctp.addip_noauth) return 1; if (net->sctp.addip_enable && !have_auth && have_asconf) return 0; return 1; } static void sctp_process_ext_param(struct sctp_association *asoc, union sctp_params param) { struct net *net = sock_net(asoc->base.sk); __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_FWD_TSN: if (net->sctp.prsctp_enable && !asoc->peer.prsctp_capable) asoc->peer.prsctp_capable = 1; break; case SCTP_CID_AUTH: /* if the peer reports AUTH, assume that he * supports AUTH. */ if (asoc->ep->auth_enable) asoc->peer.auth_capable = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: if (net->sctp.addip_enable) asoc->peer.asconf_capable = 1; break; default: break; } } } /* RFC 3.2.1 & the Implementers Guide 2.2. * * The Parameter Types are encoded such that the * highest-order two bits specify the action that must be * taken if the processing endpoint does not recognize the * Parameter Type. * * 00 - Stop processing this parameter; do not process any further * parameters within this chunk * * 01 - Stop processing this parameter, do not process any further * parameters within this chunk, and report the unrecognized * parameter in an 'Unrecognized Parameter' ERROR chunk. * * 10 - Skip this parameter and continue processing. * * 11 - Skip this parameter and continue processing but * report the unrecognized parameter in an * 'Unrecognized Parameter' ERROR chunk. * * Return value: * SCTP_IERROR_NO_ERROR - continue with the chunk * SCTP_IERROR_ERROR - stop and report an error. * SCTP_IERROR_NOMEME - out of memory. */ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { int retval = SCTP_IERROR_NO_ERROR; switch (param.p->type & SCTP_PARAM_ACTION_MASK) { case SCTP_PARAM_ACTION_DISCARD: retval = SCTP_IERROR_ERROR; break; case SCTP_PARAM_ACTION_SKIP: break; case SCTP_PARAM_ACTION_DISCARD_ERR: retval = SCTP_IERROR_ERROR; /* Fall through */ case SCTP_PARAM_ACTION_SKIP_ERR: /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (NULL == *errp) *errp = sctp_make_op_error_fixed(asoc, chunk); if (*errp) { if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, WORD_ROUND(ntohs(param.p->length)))) sctp_addto_chunk_fixed(*errp, WORD_ROUND(ntohs(param.p->length)), param.v); } else { /* If there is no memory for generating the ERROR * report as specified, an ABORT will be triggered * to the peer and the association won't be * established. */ retval = SCTP_IERROR_NOMEM; } break; default: break; } return retval; } /* Verify variable length parameters * Return values: * SCTP_IERROR_ABORT - trigger an ABORT * SCTP_IERROR_NOMEM - out of memory (abort) * SCTP_IERROR_ERROR - stop processing, trigger an ERROR * SCTP_IERROR_NO_ERROR - continue with the chunk */ static sctp_ierror_t sctp_verify_param(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, union sctp_params param, sctp_cid_t cid, struct sctp_chunk *chunk, struct sctp_chunk **err_chunk) { struct sctp_hmac_algo_param *hmacs; int retval = SCTP_IERROR_NO_ERROR; __u16 n_elt, id = 0; int i; /* FIXME - This routine is not looking at each parameter per the * chunk type, i.e., unrecognized parameters should be further * identified based on the chunk id. */ switch (param.p->type) { case SCTP_PARAM_IPV4_ADDRESS: case SCTP_PARAM_IPV6_ADDRESS: case SCTP_PARAM_COOKIE_PRESERVATIVE: case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: case SCTP_PARAM_STATE_COOKIE: case SCTP_PARAM_HEARTBEAT_INFO: case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: case SCTP_PARAM_ECN_CAPABLE: case SCTP_PARAM_ADAPTATION_LAYER_IND: break; case SCTP_PARAM_SUPPORTED_EXT: if (!sctp_verify_ext_param(net, param)) return SCTP_IERROR_ABORT; break; case SCTP_PARAM_SET_PRIMARY: if (net->sctp.addip_enable) break; goto fallthrough; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ sctp_process_hn_param(asoc, param, chunk, err_chunk); retval = SCTP_IERROR_ABORT; break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) break; goto fallthrough; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Secion 6.1 * If the random number is not 32 byte long the association * MUST be aborted. The ABORT chunk SHOULD contain the error * cause 'Protocol Violation'. */ if (SCTP_AUTH_RANDOM_LENGTH != ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Section 3.2 * The CHUNKS parameter MUST be included once in the INIT or * INIT-ACK chunk if the sender wants to receive authenticated * chunks. Its maximum length is 260 bytes. */ if (260 < ntohs(param.p->length)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fallthrough; hmacs = (struct sctp_hmac_algo_param *)param.p; n_elt = (ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) >> 1; /* SCTP-AUTH: Section 6.1 * The HMAC algorithm based on SHA-1 MUST be supported and * included in the HMAC-ALGO parameter. */ for (i = 0; i < n_elt; i++) { id = ntohs(hmacs->hmac_ids[i]); if (id == SCTP_AUTH_HMAC_ID_SHA1) break; } if (id != SCTP_AUTH_HMAC_ID_SHA1) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; fallthrough: default: pr_debug("%s: unrecognized param:%d for chunk:%d\n", __func__, ntohs(param.p->type), cid); retval = sctp_process_unk_param(asoc, param, chunk, err_chunk); break; } return retval; } /* Verify the INIT packet before we process it. */ int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, sctp_cid_t cid, sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, struct sctp_chunk **errp) { union sctp_params param; bool has_cookie = false; int result; /* Check for missing mandatory parameters. Note: Initial TSN is * also mandatory, but is not checked here since the valid range * is 0..2**32-1. RFC4960, section 3.3.3. */ if (peer_init->init_hdr.num_outbound_streams == 0 || peer_init->init_hdr.num_inbound_streams == 0 || peer_init->init_hdr.init_tag == 0 || ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW) return sctp_process_inv_mandatory(asoc, chunk, errp); sctp_walk_params(param, peer_init, init_hdr.params) { if (param.p->type == SCTP_PARAM_STATE_COOKIE) has_cookie = true; } /* There is a possibility that a parameter length was bad and * in that case we would have stoped walking the parameters. * The current param.p would point at the bad one. * Current consensus on the mailing list is to generate a PROTOCOL * VIOLATION error. We build the ERROR chunk here and let the normal * error handling code build and send the packet. */ if (param.v != (void *)chunk->chunk_end) return sctp_process_inv_paramlength(asoc, param.p, chunk, errp); /* The only missing mandatory param possible today is * the state cookie for an INIT-ACK chunk. */ if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, chunk, errp); /* Verify all the variable length parameters */ sctp_walk_params(param, peer_init, init_hdr.params) { result = sctp_verify_param(net, ep, asoc, param, cid, chunk, errp); switch (result) { case SCTP_IERROR_ABORT: case SCTP_IERROR_NOMEM: return 0; case SCTP_IERROR_ERROR: return 1; case SCTP_IERROR_NO_ERROR: default: break; } } /* for (loop through all parameters) */ return 1; } /* Unpack the parameters in an INIT packet into an association. * Returns 0 on failure, else success. * FIXME: This is an association method. */ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, const union sctp_addr *peer_addr, sctp_init_chunk_t *peer_init, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_params param; struct sctp_transport *transport; struct list_head *pos, *temp; struct sctp_af *af; union sctp_addr addr; char *cookie; int src_match = 0; /* We must include the address that the INIT packet came from. * This is the only address that matters for an INIT packet. * When processing a COOKIE ECHO, we retrieve the from address * of the INIT from the cookie. */ /* This implementation defaults to making the first transport * added as the primary transport. The source address seems to * be a a better choice than any of the embedded addresses. */ if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) goto nomem; if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr)) src_match = 1; /* Process the initialization parameters. */ sctp_walk_params(param, peer_init, init_hdr.params) { if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS || param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, chunk->sctp_hdr->source, 0); if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) src_match = 1; } if (!sctp_process_param(asoc, param, peer_addr, gfp)) goto clean_up; } /* source address of chunk may not match any valid address */ if (!src_match) goto clean_up; /* AUTH: After processing the parameters, make sure that we * have all the required info to potentially do authentications. */ if (asoc->peer.auth_capable && (!asoc->peer.peer_random || !asoc->peer.peer_hmacs)) asoc->peer.auth_capable = 0; /* In a non-backward compatible mode, if the peer claims * support for ADD-IP but not AUTH, the ADD-IP spec states * that we MUST ABORT the association. Section 6. The section * also give us an option to silently ignore the packet, which * is what we'll do here. */ if (!net->sctp.addip_noauth && (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | SCTP_PARAM_DEL_IP | SCTP_PARAM_SET_PRIMARY); asoc->peer.asconf_capable = 0; goto clean_up; } /* Walk list of transports, removing transports in the UNKNOWN state. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state == SCTP_UNKNOWN) { sctp_assoc_rm_peer(asoc, transport); } } /* The fixed INIT headers are always in network byte * order. */ asoc->peer.i.init_tag = ntohl(peer_init->init_hdr.init_tag); asoc->peer.i.a_rwnd = ntohl(peer_init->init_hdr.a_rwnd); asoc->peer.i.num_outbound_streams = ntohs(peer_init->init_hdr.num_outbound_streams); asoc->peer.i.num_inbound_streams = ntohs(peer_init->init_hdr.num_inbound_streams); asoc->peer.i.initial_tsn = ntohl(peer_init->init_hdr.initial_tsn); /* Apply the upper bounds for output streams based on peer's * number of inbound streams. */ if (asoc->c.sinit_num_ostreams > ntohs(peer_init->init_hdr.num_inbound_streams)) { asoc->c.sinit_num_ostreams = ntohs(peer_init->init_hdr.num_inbound_streams); } if (asoc->c.sinit_max_instreams > ntohs(peer_init->init_hdr.num_outbound_streams)) { asoc->c.sinit_max_instreams = ntohs(peer_init->init_hdr.num_outbound_streams); } /* Copy Initiation tag from INIT to VT_peer in cookie. */ asoc->c.peer_vtag = asoc->peer.i.init_tag; /* Peer Rwnd : Current calculated value of the peer's rwnd. */ asoc->peer.rwnd = asoc->peer.i.a_rwnd; /* Copy cookie in case we need to resend COOKIE-ECHO. */ cookie = asoc->peer.cookie; if (cookie) { asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); if (!asoc->peer.cookie) goto clean_up; } /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily * high (for example, implementations MAY use the size of the receiver * advertised window). */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { transport->ssthresh = asoc->peer.i.a_rwnd; } /* Set up the TSN tracking pieces. */ if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, asoc->peer.i.initial_tsn, gfp)) goto clean_up; /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * * The stream sequence number in all the streams shall start * from 0 when the association is established. Also, when the * stream sequence number reaches the value 65535 the next * stream sequence number shall be set to 0. */ /* Allocate storage for the negotiated streams if it is not a temporary * association. */ if (!asoc->temp) { int error; asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, asoc->c.sinit_num_ostreams, gfp); if (!asoc->ssnmap) goto clean_up; error = sctp_assoc_set_id(asoc, gfp); if (error) goto clean_up; } /* ADDIP Section 4.1 ASCONF Chunk Procedures * * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do the following: * ... * A2) A serial number should be assigned to the Chunk. The serial * number should be a monotonically increasing number. All serial * numbers are defined to be initialized at the start of the * association to the same value as the Initial TSN. */ asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; return 1; clean_up: /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state != SCTP_ACTIVE) sctp_assoc_rm_peer(asoc, transport); } nomem: return 0; } /* Update asoc with the option described in param. * * RFC2960 3.3.2.1 Optional/Variable Length Parameters in INIT * * asoc is the association to update. * param is the variable length parameter to use for update. * cid tells us if this is an INIT, INIT ACK or COOKIE ECHO. * If the current packet is an INIT we want to minimize the amount of * work we do. In particular, we should not build transport * structures for the addresses. */ static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_addr addr; int i; __u16 sat; int retval = 1; sctp_scope_t scope; time_t stale; struct sctp_af *af; union sctp_addr_param *addr_param; struct sctp_transport *t; struct sctp_endpoint *ep = asoc->ep; /* We maintain all INIT parameters in network byte order all the * time. This allows us to not worry about whether the parameters * came from a fresh INIT, and INIT ACK, or were stored in a cookie. */ switch (param.p->type) { case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 != asoc->base.sk->sk_family) break; goto do_addr_param; case SCTP_PARAM_IPV4_ADDRESS: /* v4 addresses are not allowed on v6-only socket */ if (ipv6_only_sock(asoc->base.sk)) break; do_addr_param: af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); scope = sctp_scope(peer_addr); if (sctp_in_scope(net, &addr, scope)) if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) return 0; break; case SCTP_PARAM_COOKIE_PRESERVATIVE: if (!net->sctp.cookie_preserve_enable) break; stale = ntohl(param.life->lifespan_increment); /* Suggested Cookie Life span increment's unit is msec, * (1/1000sec). */ asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale); break; case SCTP_PARAM_HOST_NAME_ADDRESS: pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__); break; case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: /* Turn off the default values first so we'll know which * ones are really set by the peer. */ asoc->peer.ipv4_address = 0; asoc->peer.ipv6_address = 0; /* Assume that peer supports the address family * by which it sends a packet. */ if (peer_addr->sa.sa_family == AF_INET6) asoc->peer.ipv6_address = 1; else if (peer_addr->sa.sa_family == AF_INET) asoc->peer.ipv4_address = 1; /* Cycle through address types; avoid divide by 0. */ sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); if (sat) sat /= sizeof(__u16); for (i = 0; i < sat; ++i) { switch (param.sat->types[i]) { case SCTP_PARAM_IPV4_ADDRESS: asoc->peer.ipv4_address = 1; break; case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 == asoc->base.sk->sk_family) asoc->peer.ipv6_address = 1; break; case SCTP_PARAM_HOST_NAME_ADDRESS: asoc->peer.hostname_address = 1; break; default: /* Just ignore anything else. */ break; } } break; case SCTP_PARAM_STATE_COOKIE: asoc->peer.cookie_len = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); asoc->peer.cookie = param.cookie->body; break; case SCTP_PARAM_HEARTBEAT_INFO: /* Would be odd to receive, but it causes no problems. */ break; case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: /* Rejected during verify stage. */ break; case SCTP_PARAM_ECN_CAPABLE: asoc->peer.ecn_capable = 1; break; case SCTP_PARAM_ADAPTATION_LAYER_IND: asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind); break; case SCTP_PARAM_SET_PRIMARY: if (!net->sctp.addip_enable) goto fall_through; addr_param = param.v + sizeof(sctp_addip_param_t); af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* if the address is invalid, we can't process it. * XXX: see spec for what to do. */ if (!af->addr_valid(&addr, NULL, NULL)) break; t = sctp_assoc_lookup_paddr(asoc, &addr); if (!t) break; sctp_assoc_set_primary(asoc, t); break; case SCTP_PARAM_SUPPORTED_EXT: sctp_process_ext_param(asoc, param); break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) { asoc->peer.prsctp_capable = 1; break; } /* Fall Through */ goto fall_through; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fall_through; /* Save peer's random parameter */ asoc->peer.peer_random = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_random) { retval = 0; break; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fall_through; /* Save peer's HMAC list */ asoc->peer.peer_hmacs = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_hmacs) { retval = 0; break; } /* Set the default HMAC the peer requested*/ sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo); break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fall_through; asoc->peer.peer_chunks = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_chunks) retval = 0; break; fall_through: default: /* Any unrecognized parameters should have been caught * and handled by sctp_verify_param() which should be * called prior to this routine. Simply log the error * here. */ pr_debug("%s: ignoring param:%d for association:%p.\n", __func__, ntohs(param.p->type), asoc); break; } return retval; } /* Select a new verification tag. */ __u32 sctp_generate_tag(const struct sctp_endpoint *ep) { /* I believe that this random number generator complies with RFC1750. * A tag of 0 is reserved for special cases (e.g. INIT). */ __u32 x; do { get_random_bytes(&x, sizeof(__u32)); } while (x == 0); return x; } /* Select an initial TSN to send during startup. */ __u32 sctp_generate_tsn(const struct sctp_endpoint *ep) { __u32 retval; get_random_bytes(&retval, sizeof(__u32)); return retval; } /* * ADDIP 3.1.1 Address Configuration Change Chunk (ASCONF) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Address Parameter and other parameter will not be wrapped in this function */ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, union sctp_addr *addr, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; length += addrlen; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length); if (!retval) return NULL; asconf.serial = htonl(asoc->addip_serial++); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); retval->param_hdr.v = sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP * 3.2.1 Add IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC001 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 3.2.2 Delete IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC002 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * */ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, union sctp_addr *laddr, struct sockaddr *addrs, int addrcnt, __be16 flags) { sctp_addip_param_t param; struct sctp_chunk *retval; union sctp_addr_param addr_param; union sctp_addr *addr; void *addr_buf; struct sctp_af *af; int paramlen = sizeof(param); int addr_param_len = 0; int totallen = 0; int i; int del_pickup = 0; /* Get total length of all the address parameters. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); totallen += paramlen; totallen += addr_param_len; addr_buf += af->sockaddr_len; if (asoc->asconf_addr_del_pending && !del_pickup) { /* reuse the parameter length from the same scope one */ totallen += paramlen; totallen += addr_param_len; del_pickup = 1; pr_debug("%s: picked same-scope del_pending addr, " "totallen for all addresses is %d\n", __func__, totallen); } } /* Create an asconf chunk with the required length. */ retval = sctp_make_asconf(asoc, laddr, totallen); if (!retval) return NULL; /* Add the address parameters to the asconf chunk. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = flags; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); addr_buf += af->sockaddr_len; } if (flags == SCTP_PARAM_ADD_IP && del_pickup) { addr = asoc->asconf_addr_del_pending; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = SCTP_PARAM_DEL_IP; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); } return retval; } /* ADDIP * 3.2.4 Set Primary IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type =0xC004 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF chunk with Set Primary IP address parameter. */ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr) { sctp_addip_param_t param; struct sctp_chunk *retval; int len = sizeof(param); union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; len += addrlen; /* Create the chunk and make asconf header. */ retval = sctp_make_asconf(asoc, addr, len); if (!retval) return NULL; param.param_hdr.type = SCTP_PARAM_SET_PRIMARY; param.param_hdr.length = htons(len); param.crr_id = 0; sctp_addto_chunk(retval, sizeof(param), &param); sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0x80 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF_ACK chunk with enough space for the parameter responses. */ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, __u32 serial, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length); if (!retval) return NULL; asconf.serial = htonl(serial); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); return retval; } /* Add response parameters to an ASCONF_ACK chunk. */ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, __be16 err_code, sctp_addip_param_t *asconf_param) { sctp_addip_param_t ack_param; sctp_errhdr_t err_param; int asconf_param_len = 0; int err_param_len = 0; __be16 response_type; if (SCTP_ERROR_NO_ERROR == err_code) { response_type = SCTP_PARAM_SUCCESS_REPORT; } else { response_type = SCTP_PARAM_ERR_CAUSE; err_param_len = sizeof(err_param); if (asconf_param) asconf_param_len = ntohs(asconf_param->param_hdr.length); } /* Add Success Indication or Error Cause Indication parameter. */ ack_param.param_hdr.type = response_type; ack_param.param_hdr.length = htons(sizeof(ack_param) + err_param_len + asconf_param_len); ack_param.crr_id = crr_id; sctp_addto_chunk(chunk, sizeof(ack_param), &ack_param); if (SCTP_ERROR_NO_ERROR == err_code) return; /* Add Error Cause parameter. */ err_param.cause = err_code; err_param.length = htons(err_param_len + asconf_param_len); sctp_addto_chunk(chunk, err_param_len, &err_param); /* Add the failed TLV copied from ASCONF chunk. */ if (asconf_param) sctp_addto_chunk(chunk, asconf_param_len, asconf_param); } /* Process a asconf parameter. */ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, struct sctp_chunk *asconf, sctp_addip_param_t *asconf_param) { struct sctp_transport *peer; struct sctp_af *af; union sctp_addr addr; union sctp_addr_param *addr_param; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) return SCTP_ERROR_UNKNOWN_PARAM; switch (addr_param->p.type) { case SCTP_PARAM_IPV6_ADDRESS: if (!asoc->peer.ipv6_address) return SCTP_ERROR_DNS_FAILED; break; case SCTP_PARAM_IPV4_ADDRESS: if (!asoc->peer.ipv4_address) return SCTP_ERROR_DNS_FAILED; break; default: return SCTP_ERROR_DNS_FAILED; } af = sctp_get_af_specific(param_type2af(addr_param->p.type)); if (unlikely(!af)) return SCTP_ERROR_DNS_FAILED; af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast * or multicast address. * (note: wildcard is permitted and requires special handling so * make sure we check for that) */ if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb)) return SCTP_ERROR_DNS_FAILED; switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* Section 4.2.1: * If the address 0.0.0.0 or ::0 is provided, the source * address of the packet MUST be added. */ if (af->is_any(&addr)) memcpy(&addr, &asconf->source, sizeof(addr)); /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address * request and does not have the local resources to add this * new address to the association, it MUST return an Error * Cause TLV set to the new error code 'Operation Refused * Due to Resource Shortage'. */ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); if (!peer) return SCTP_ERROR_RSRC_LOW; /* Start the heartbeat timer. */ if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) sctp_transport_hold(peer); asoc->new_transport = peer; break; case SCTP_PARAM_DEL_IP: /* ADDIP 4.3 D7) If a request is received to delete the * last remaining IP address of a peer endpoint, the receiver * MUST send an Error Cause TLV with the error cause set to the * new error code 'Request to Delete Last Remaining IP Address'. */ if (asoc->peer.transport_count == 1) return SCTP_ERROR_DEL_LAST_IP; /* ADDIP 4.3 D8) If a request is received to delete an IP * address which is also the source address of the IP packet * which contained the ASCONF chunk, the receiver MUST reject * this request. To reject the request the receiver MUST send * an Error Cause TLV set to the new error code 'Request to * Delete Source IP Address' */ if (sctp_cmp_addr_exact(&asconf->source, &addr)) return SCTP_ERROR_DEL_SRC_IP; /* Section 4.2.2 * If the address 0.0.0.0 or ::0 is provided, all * addresses of the peer except the source address of the * packet MUST be deleted. */ if (af->is_any(&addr)) { sctp_assoc_set_primary(asoc, asconf->transport); sctp_assoc_del_nonprimary_peers(asoc, asconf->transport); } else sctp_assoc_del_peer(asoc, &addr); break; case SCTP_PARAM_SET_PRIMARY: /* ADDIP Section 4.2.4 * If the address 0.0.0.0 or ::0 is provided, the receiver * MAY mark the source address of the packet as its * primary. */ if (af->is_any(&addr)) memcpy(&addr.v4, sctp_source(asconf), sizeof(addr)); peer = sctp_assoc_lookup_paddr(asoc, &addr); if (!peer) return SCTP_ERROR_DNS_FAILED; sctp_assoc_set_primary(asoc, peer); break; } return SCTP_ERROR_NO_ERROR; } /* Verify the ASCONF packet before we process it. */ int sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_paramhdr *param_hdr, void *chunk_end, struct sctp_paramhdr **errp) { sctp_addip_param_t *asconf_param; union sctp_params param; int length, plen; param.v = (sctp_paramhdr_t *) param_hdr; while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { length = ntohs(param.p->length); *errp = param.p; if (param.v > chunk_end - length || length < sizeof(sctp_paramhdr_t)) return 0; switch (param.p->type) { case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: asconf_param = (sctp_addip_param_t *)param.v; plen = ntohs(asconf_param->param_hdr.length); if (plen < sizeof(sctp_addip_param_t) + sizeof(sctp_paramhdr_t)) return 0; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(sctp_addip_param_t)) return 0; break; default: break; } param.v += WORD_ROUND(length); } if (param.v != chunk_end) return 0; return 1; } /* Process an incoming ASCONF chunk with the next expected serial no. and * return an ASCONF_ACK chunk to be sent in response. */ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, struct sctp_chunk *asconf) { sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; struct sctp_chunk *asconf_ack; __be16 err_code; int length = 0; int chunk_len; __u32 serial; int all_param_pass = 1; chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); hdr = (sctp_addiphdr_t *)asconf->skb->data; serial = ntohl(hdr->serial); /* Skip the addiphdr and store a pointer to address parameter. */ length = sizeof(sctp_addiphdr_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); chunk_len -= length; /* Skip the address parameter and store a pointer to the first * asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; chunk_len -= length; /* create an ASCONF_ACK chunk. * Based on the definitions of parameters, we know that the size of * ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF * parameters. */ asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4); if (!asconf_ack) goto done; /* Process the TLVs contained within the ASCONF chunk. */ while (chunk_len > 0) { err_code = sctp_process_asconf_param(asoc, asconf, asconf_param); /* ADDIP 4.1 A7) * If an error response is received for a TLV parameter, * all TLVs with no response before the failed TLV are * considered successful if not reported. All TLVs after * the failed response are considered unsuccessful unless * a specific success indication is present for the parameter. */ if (SCTP_ERROR_NO_ERROR != err_code) all_param_pass = 0; if (!all_param_pass) sctp_add_asconf_response(asconf_ack, asconf_param->crr_id, err_code, asconf_param); /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add * an IP address sends an 'Out of Resource' in its response, it * MUST also fail any subsequent add or delete requests bundled * in the ASCONF. */ if (SCTP_ERROR_RSRC_LOW == err_code) goto done; /* Move to the next ASCONF param. */ length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; chunk_len -= length; } done: asoc->peer.addip_serial++; /* If we are sending a new ASCONF_ACK hold a reference to it in assoc * after freeing the reference to old asconf ack if any. */ if (asconf_ack) { sctp_chunk_hold(asconf_ack); list_add_tail(&asconf_ack->transmitted_list, &asoc->asconf_ack_list); } return asconf_ack; } /* Process a asconf parameter that is successfully acked. */ static void sctp_asconf_param_success(struct sctp_association *asoc, sctp_addip_param_t *asconf_param) { struct sctp_af *af; union sctp_addr addr; struct sctp_bind_addr *bp = &asoc->base.bind_addr; union sctp_addr_param *addr_param; struct sctp_transport *transport; struct sctp_sockaddr_entry *saddr; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); /* We have checked the packet before, so we do not check again. */ af = sctp_get_af_specific(param_type2af(addr_param->p.type)); af->from_addr_param(&addr, addr_param, htons(bp->port), 0); switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* This is always done in BH context with a socket lock * held, so the list can not change. */ local_bh_disable(); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, &addr)) saddr->state = SCTP_ADDR_SRC; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; case SCTP_PARAM_DEL_IP: local_bh_disable(); sctp_del_bind_addr(bp, &addr); if (asoc->asconf_addr_del_pending != NULL && sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) { kfree(asoc->asconf_addr_del_pending); asoc->asconf_addr_del_pending = NULL; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; default: break; } } /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk * for the given asconf parameter. If there is no response for this parameter, * return the error code based on the third argument 'no_err'. * ADDIP 4.1 * A7) If an error response is received for a TLV parameter, all TLVs with no * response before the failed TLV are considered successful if not reported. * All TLVs after the failed response are considered unsuccessful unless a * specific success indication is present for the parameter. */ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, sctp_addip_param_t *asconf_param, int no_err) { sctp_addip_param_t *asconf_ack_param; sctp_errhdr_t *err_param; int length; int asconf_ack_len; __be16 err_code; if (no_err) err_code = SCTP_ERROR_NO_ERROR; else err_code = SCTP_ERROR_REQ_REFUSED; asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); /* Skip the addiphdr from the asconf_ack chunk and store a pointer to * the first asconf_ack parameter. */ length = sizeof(sctp_addiphdr_t); asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + length); asconf_ack_len -= length; while (asconf_ack_len > 0) { if (asconf_ack_param->crr_id == asconf_param->crr_id) { switch (asconf_ack_param->param_hdr.type) { case SCTP_PARAM_SUCCESS_REPORT: return SCTP_ERROR_NO_ERROR; case SCTP_PARAM_ERR_CAUSE: length = sizeof(sctp_addip_param_t); err_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; if (asconf_ack_len > 0) return err_param->cause; else return SCTP_ERROR_INV_PARAM; break; default: return SCTP_ERROR_INV_PARAM; } } length = ntohs(asconf_ack_param->param_hdr.length); asconf_ack_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; } return err_code; } /* Process an incoming ASCONF_ACK chunk against the cached last ASCONF chunk. */ int sctp_process_asconf_ack(struct sctp_association *asoc, struct sctp_chunk *asconf_ack) { struct sctp_chunk *asconf = asoc->addip_last_asconf; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; int length = 0; int asconf_len = asconf->skb->len; int all_param_pass = 0; int no_err = 1; int retval = 0; __be16 err_code = SCTP_ERROR_NO_ERROR; /* Skip the chunkhdr and addiphdr from the last asconf sent and store * a pointer to address parameter. */ length = sizeof(sctp_addip_chunk_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); asconf_len -= length; /* Skip the address parameter in the last asconf sent and store a * pointer to the first asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; asconf_len -= length; /* ADDIP 4.1 * A8) If there is no response(s) to specific TLV parameter(s), and no * failures are indicated, then all request(s) are considered * successful. */ if (asconf_ack->skb->len == sizeof(sctp_addiphdr_t)) all_param_pass = 1; /* Process the TLVs contained in the last sent ASCONF chunk. */ while (asconf_len > 0) { if (all_param_pass) err_code = SCTP_ERROR_NO_ERROR; else { err_code = sctp_get_asconf_response(asconf_ack, asconf_param, no_err); if (no_err && (SCTP_ERROR_NO_ERROR != err_code)) no_err = 0; } switch (err_code) { case SCTP_ERROR_NO_ERROR: sctp_asconf_param_success(asoc, asconf_param); break; case SCTP_ERROR_RSRC_LOW: retval = 1; break; case SCTP_ERROR_UNKNOWN_PARAM: /* Disable sending this type of asconf parameter in * future. */ asoc->peer.addip_disabled_mask |= asconf_param->param_hdr.type; break; case SCTP_ERROR_REQ_REFUSED: case SCTP_ERROR_DEL_LAST_IP: case SCTP_ERROR_DEL_SRC_IP: default: break; } /* Skip the processed asconf parameter and move to the next * one. */ length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; asconf_len -= length; } if (no_err && asoc->src_out_of_asoc_ok) { asoc->src_out_of_asoc_ok = 0; sctp_transport_immediate_rtx(asoc->peer.primary_path); } /* Free the cached last sent asconf chunk. */ list_del_init(&asconf->transmitted_list); sctp_chunk_free(asconf); asoc->addip_last_asconf = NULL; return retval; } /* Make a FWD TSN chunk. */ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist) { struct sctp_chunk *retval = NULL; struct sctp_fwdtsn_hdr ftsn_hdr; struct sctp_fwdtsn_skip skip; size_t hint; int i; hint = (nstreams + 1) * sizeof(__u32); retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint); if (!retval) return NULL; ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn); retval->subh.fwdtsn_hdr = sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr); for (i = 0; i < nstreams; i++) { skip.stream = skiplist[i].stream; skip.ssn = skiplist[i].ssn; sctp_addto_chunk(retval, sizeof(skip), &skip); } return retval; }
./CrossVul/dataset_final_sorted/CWE-399/c/bad_2166_1
crossvul-cpp_data_bad_3463_0
/* * Monkey's Audio APE demuxer * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org> * based upon libdemac from Dave Chapman. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include "libavutil/intreadwrite.h" #include "avformat.h" #include "apetag.h" #define ENABLE_DEBUG 0 /* The earliest and latest file formats supported by this library */ #define APE_MIN_VERSION 3950 #define APE_MAX_VERSION 3990 #define MAC_FORMAT_FLAG_8_BIT 1 // is 8-bit [OBSOLETE] #define MAC_FORMAT_FLAG_CRC 2 // uses the new CRC32 error detection [OBSOLETE] #define MAC_FORMAT_FLAG_HAS_PEAK_LEVEL 4 // uint32 nPeakLevel after the header [OBSOLETE] #define MAC_FORMAT_FLAG_24_BIT 8 // is 24-bit [OBSOLETE] #define MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS 16 // has the number of seek elements after the peak level #define MAC_FORMAT_FLAG_CREATE_WAV_HEADER 32 // create the wave header on decompression (not stored) #define MAC_SUBFRAME_SIZE 4608 #define APE_EXTRADATA_SIZE 6 typedef struct { int64_t pos; int nblocks; int size; int skip; int64_t pts; } APEFrame; typedef struct { /* Derived fields */ uint32_t junklength; uint32_t firstframe; uint32_t totalsamples; int currentframe; APEFrame *frames; /* Info from Descriptor Block */ char magic[4]; int16_t fileversion; int16_t padding1; uint32_t descriptorlength; uint32_t headerlength; uint32_t seektablelength; uint32_t wavheaderlength; uint32_t audiodatalength; uint32_t audiodatalength_high; uint32_t wavtaillength; uint8_t md5[16]; /* Info from Header Block */ uint16_t compressiontype; uint16_t formatflags; uint32_t blocksperframe; uint32_t finalframeblocks; uint32_t totalframes; uint16_t bps; uint16_t channels; uint32_t samplerate; /* Seektable */ uint32_t *seektable; } APEContext; static int ape_probe(AVProbeData * p) { if (p->buf[0] == 'M' && p->buf[1] == 'A' && p->buf[2] == 'C' && p->buf[3] == ' ') return AVPROBE_SCORE_MAX; return 0; } static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx) { #if ENABLE_DEBUG int i; av_log(s, AV_LOG_DEBUG, "Descriptor Block:\n\n"); av_log(s, AV_LOG_DEBUG, "magic = \"%c%c%c%c\"\n", ape_ctx->magic[0], ape_ctx->magic[1], ape_ctx->magic[2], ape_ctx->magic[3]); av_log(s, AV_LOG_DEBUG, "fileversion = %d\n", ape_ctx->fileversion); av_log(s, AV_LOG_DEBUG, "descriptorlength = %d\n", ape_ctx->descriptorlength); av_log(s, AV_LOG_DEBUG, "headerlength = %d\n", ape_ctx->headerlength); av_log(s, AV_LOG_DEBUG, "seektablelength = %d\n", ape_ctx->seektablelength); av_log(s, AV_LOG_DEBUG, "wavheaderlength = %d\n", ape_ctx->wavheaderlength); av_log(s, AV_LOG_DEBUG, "audiodatalength = %d\n", ape_ctx->audiodatalength); av_log(s, AV_LOG_DEBUG, "audiodatalength_high = %d\n", ape_ctx->audiodatalength_high); av_log(s, AV_LOG_DEBUG, "wavtaillength = %d\n", ape_ctx->wavtaillength); av_log(s, AV_LOG_DEBUG, "md5 = "); for (i = 0; i < 16; i++) av_log(s, AV_LOG_DEBUG, "%02x", ape_ctx->md5[i]); av_log(s, AV_LOG_DEBUG, "\n"); av_log(s, AV_LOG_DEBUG, "\nHeader Block:\n\n"); av_log(s, AV_LOG_DEBUG, "compressiontype = %d\n", ape_ctx->compressiontype); av_log(s, AV_LOG_DEBUG, "formatflags = %d\n", ape_ctx->formatflags); av_log(s, AV_LOG_DEBUG, "blocksperframe = %d\n", ape_ctx->blocksperframe); av_log(s, AV_LOG_DEBUG, "finalframeblocks = %d\n", ape_ctx->finalframeblocks); av_log(s, AV_LOG_DEBUG, "totalframes = %d\n", ape_ctx->totalframes); av_log(s, AV_LOG_DEBUG, "bps = %d\n", ape_ctx->bps); av_log(s, AV_LOG_DEBUG, "channels = %d\n", ape_ctx->channels); av_log(s, AV_LOG_DEBUG, "samplerate = %d\n", ape_ctx->samplerate); av_log(s, AV_LOG_DEBUG, "\nSeektable\n\n"); if ((ape_ctx->seektablelength / sizeof(uint32_t)) != ape_ctx->totalframes) { av_log(s, AV_LOG_DEBUG, "No seektable\n"); } else { for (i = 0; i < ape_ctx->seektablelength / sizeof(uint32_t); i++) { if (i < ape_ctx->totalframes - 1) { av_log(s, AV_LOG_DEBUG, "%8d %d (%d bytes)\n", i, ape_ctx->seektable[i], ape_ctx->seektable[i + 1] - ape_ctx->seektable[i]); } else { av_log(s, AV_LOG_DEBUG, "%8d %d\n", i, ape_ctx->seektable[i]); } } } av_log(s, AV_LOG_DEBUG, "\nFrames\n\n"); for (i = 0; i < ape_ctx->totalframes; i++) av_log(s, AV_LOG_DEBUG, "%8d %8lld %8d (%d samples)\n", i, ape_ctx->frames[i].pos, ape_ctx->frames[i].size, ape_ctx->frames[i].nblocks); av_log(s, AV_LOG_DEBUG, "\nCalculated information:\n\n"); av_log(s, AV_LOG_DEBUG, "junklength = %d\n", ape_ctx->junklength); av_log(s, AV_LOG_DEBUG, "firstframe = %d\n", ape_ctx->firstframe); av_log(s, AV_LOG_DEBUG, "totalsamples = %d\n", ape_ctx->totalsamples); #endif } static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap) { AVIOContext *pb = s->pb; APEContext *ape = s->priv_data; AVStream *st; uint32_t tag; int i; int total_blocks; int64_t pts; /* TODO: Skip any leading junk such as id3v2 tags */ ape->junklength = 0; tag = avio_rl32(pb); if (tag != MKTAG('M', 'A', 'C', ' ')) return -1; ape->fileversion = avio_rl16(pb); if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) { av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10); return -1; } if (ape->fileversion >= 3980) { ape->padding1 = avio_rl16(pb); ape->descriptorlength = avio_rl32(pb); ape->headerlength = avio_rl32(pb); ape->seektablelength = avio_rl32(pb); ape->wavheaderlength = avio_rl32(pb); ape->audiodatalength = avio_rl32(pb); ape->audiodatalength_high = avio_rl32(pb); ape->wavtaillength = avio_rl32(pb); avio_read(pb, ape->md5, 16); /* Skip any unknown bytes at the end of the descriptor. This is for future compatibility */ if (ape->descriptorlength > 52) avio_seek(pb, ape->descriptorlength - 52, SEEK_CUR); /* Read header data */ ape->compressiontype = avio_rl16(pb); ape->formatflags = avio_rl16(pb); ape->blocksperframe = avio_rl32(pb); ape->finalframeblocks = avio_rl32(pb); ape->totalframes = avio_rl32(pb); ape->bps = avio_rl16(pb); ape->channels = avio_rl16(pb); ape->samplerate = avio_rl32(pb); } else { ape->descriptorlength = 0; ape->headerlength = 32; ape->compressiontype = avio_rl16(pb); ape->formatflags = avio_rl16(pb); ape->channels = avio_rl16(pb); ape->samplerate = avio_rl32(pb); ape->wavheaderlength = avio_rl32(pb); ape->wavtaillength = avio_rl32(pb); ape->totalframes = avio_rl32(pb); ape->finalframeblocks = avio_rl32(pb); if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) { avio_seek(pb, 4, SEEK_CUR); /* Skip the peak level */ ape->headerlength += 4; } if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) { ape->seektablelength = avio_rl32(pb); ape->headerlength += 4; ape->seektablelength *= sizeof(int32_t); } else ape->seektablelength = ape->totalframes * sizeof(int32_t); if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT) ape->bps = 8; else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT) ape->bps = 24; else ape->bps = 16; if (ape->fileversion >= 3950) ape->blocksperframe = 73728 * 4; else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800 && ape->compressiontype >= 4000)) ape->blocksperframe = 73728; else ape->blocksperframe = 9216; /* Skip any stored wav header */ if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER)) avio_seek(pb, ape->wavheaderlength, SEEK_CUR); } if(ape->totalframes > UINT_MAX / sizeof(APEFrame)){ av_log(s, AV_LOG_ERROR, "Too many frames: %d\n", ape->totalframes); return -1; } ape->frames = av_malloc(ape->totalframes * sizeof(APEFrame)); if(!ape->frames) return AVERROR(ENOMEM); ape->firstframe = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength; ape->currentframe = 0; ape->totalsamples = ape->finalframeblocks; if (ape->totalframes > 1) ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1); if (ape->seektablelength > 0) { ape->seektable = av_malloc(ape->seektablelength); for (i = 0; i < ape->seektablelength / sizeof(uint32_t); i++) ape->seektable[i] = avio_rl32(pb); } ape->frames[0].pos = ape->firstframe; ape->frames[0].nblocks = ape->blocksperframe; ape->frames[0].skip = 0; for (i = 1; i < ape->totalframes; i++) { ape->frames[i].pos = ape->seektable[i]; //ape->frames[i-1].pos + ape->blocksperframe; ape->frames[i].nblocks = ape->blocksperframe; ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos; ape->frames[i].skip = (ape->frames[i].pos - ape->frames[0].pos) & 3; } ape->frames[ape->totalframes - 1].size = ape->finalframeblocks * 4; ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks; for (i = 0; i < ape->totalframes; i++) { if(ape->frames[i].skip){ ape->frames[i].pos -= ape->frames[i].skip; ape->frames[i].size += ape->frames[i].skip; } ape->frames[i].size = (ape->frames[i].size + 3) & ~3; } ape_dumpinfo(s, ape); /* try to read APE tags */ if (!url_is_streamed(pb)) { ff_ape_parse_tag(s); avio_seek(pb, 0, SEEK_SET); } av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10, ape->compressiontype); /* now we are ready: build format streams */ st = av_new_stream(s, 0); if (!st) return -1; total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_APE; st->codec->codec_tag = MKTAG('A', 'P', 'E', ' '); st->codec->channels = ape->channels; st->codec->sample_rate = ape->samplerate; st->codec->bits_per_coded_sample = ape->bps; st->codec->frame_size = MAC_SUBFRAME_SIZE; st->nb_frames = ape->totalframes; st->start_time = 0; st->duration = total_blocks / MAC_SUBFRAME_SIZE; av_set_pts_info(st, 64, MAC_SUBFRAME_SIZE, ape->samplerate); st->codec->extradata = av_malloc(APE_EXTRADATA_SIZE); st->codec->extradata_size = APE_EXTRADATA_SIZE; AV_WL16(st->codec->extradata + 0, ape->fileversion); AV_WL16(st->codec->extradata + 2, ape->compressiontype); AV_WL16(st->codec->extradata + 4, ape->formatflags); pts = 0; for (i = 0; i < ape->totalframes; i++) { ape->frames[i].pts = pts; av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME); pts += ape->blocksperframe / MAC_SUBFRAME_SIZE; } return 0; } static int ape_read_packet(AVFormatContext * s, AVPacket * pkt) { int ret; int nblocks; APEContext *ape = s->priv_data; uint32_t extra_size = 8; if (s->pb->eof_reached) return AVERROR(EIO); if (ape->currentframe > ape->totalframes) return AVERROR(EIO); avio_seek (s->pb, ape->frames[ape->currentframe].pos, SEEK_SET); /* Calculate how many blocks there are in this frame */ if (ape->currentframe == (ape->totalframes - 1)) nblocks = ape->finalframeblocks; else nblocks = ape->blocksperframe; if (av_new_packet(pkt, ape->frames[ape->currentframe].size + extra_size) < 0) return AVERROR(ENOMEM); AV_WL32(pkt->data , nblocks); AV_WL32(pkt->data + 4, ape->frames[ape->currentframe].skip); ret = avio_read(s->pb, pkt->data + extra_size, ape->frames[ape->currentframe].size); pkt->pts = ape->frames[ape->currentframe].pts; pkt->stream_index = 0; /* note: we need to modify the packet size here to handle the last packet */ pkt->size = ret + extra_size; ape->currentframe++; return 0; } static int ape_read_close(AVFormatContext * s) { APEContext *ape = s->priv_data; av_freep(&ape->frames); av_freep(&ape->seektable); return 0; } static int ape_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { AVStream *st = s->streams[stream_index]; APEContext *ape = s->priv_data; int index = av_index_search_timestamp(st, timestamp, flags); if (index < 0) return -1; ape->currentframe = index; return 0; } AVInputFormat ff_ape_demuxer = { "ape", NULL_IF_CONFIG_SMALL("Monkey's Audio"), sizeof(APEContext), ape_probe, ape_read_header, ape_read_packet, ape_read_close, ape_read_seek, .extensions = "ape,apl,mac" };
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3463_0
crossvul-cpp_data_bad_3575_0
/* * Linux NET3: Internet Group Management Protocol [IGMP] * * This code implements the IGMP protocol as defined in RFC1112. There has * been a further revision of this protocol since which is now supported. * * If you have trouble with this module be careful what gcc you have used, * the older version didn't come out right using gcc 2.5.8, the newer one * seems to fall out with gcc 2.6.2. * * Authors: * Alan Cox <alan@lxorguk.ukuu.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * * Alan Cox : Added lots of __inline__ to optimise * the memory usage of all the tiny little * functions. * Alan Cox : Dumped the header building experiment. * Alan Cox : Minor tweaks ready for multicast routing * and extended IGMP protocol. * Alan Cox : Removed a load of inline directives. Gcc 2.5.8 * writes utterly bogus code otherwise (sigh) * fixed IGMP loopback to behave in the manner * desired by mrouted, fixed the fact it has been * broken since 1.3.6 and cleaned up a few minor * points. * * Chih-Jen Chang : Tried to revise IGMP to Version 2 * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu * The enhancements are mainly based on Steve Deering's * ipmulti-3.5 source code. * Chih-Jen Chang : Added the igmp_get_mrouter_info and * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of * the mrouted version on that device. * Chih-Jen Chang : Added the max_resp_time parameter to * Tsu-Sheng Tsao igmp_heard_query(). Using this parameter * to identify the multicast router version * and do what the IGMP version 2 specified. * Chih-Jen Chang : Added a timer to revert to IGMP V2 router * Tsu-Sheng Tsao if the specified time expired. * Alan Cox : Stop IGMP from 0.0.0.0 being accepted. * Alan Cox : Use GFP_ATOMIC in the right places. * Christian Daudt : igmp timer wasn't set for local group * memberships but was being deleted, * which caused a "del_timer() called * from %p with timer not initialized\n" * message (960131). * Christian Daudt : removed del_timer from * igmp_timer_expire function (960205). * Christian Daudt : igmp_heard_report now only calls * igmp_timer_expire if tm->running is * true (960216). * Malcolm Beattie : ttl comparison wrong in igmp_rcv made * igmp_heard_query never trigger. Expiry * miscalculation fixed in igmp_heard_query * and random() made to return unsigned to * prevent negative expiry times. * Alexey Kuznetsov: Wrong group leaving behaviour, backport * fix from pending 2.1.x patches. * Alan Cox: Forget to enable FDDI support earlier. * Alexey Kuznetsov: Fixed leaving groups on device down. * Alexey Kuznetsov: Accordance to igmp-v2-06 draft. * David L Stevens: IGMPv3 support, with help from * Vinay Kulkarni */ #include <linux/module.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/if_arp.h> #include <linux/rtnetlink.h> #include <linux/times.h> #include <net/net_namespace.h> #include <net/arp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/route.h> #include <net/sock.h> #include <net/checksum.h> #include <linux/netfilter_ipv4.h> #ifdef CONFIG_IP_MROUTE #include <linux/mroute.h> #endif #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/seq_file.h> #endif #define IP_MAX_MEMBERSHIPS 20 #define IP_MAX_MSF 10 #ifdef CONFIG_IP_MULTICAST /* Parameter names and values are taken from igmp-v2-06 draft */ #define IGMP_V1_Router_Present_Timeout (400*HZ) #define IGMP_V2_Router_Present_Timeout (400*HZ) #define IGMP_Unsolicited_Report_Interval (10*HZ) #define IGMP_Query_Response_Interval (10*HZ) #define IGMP_Unsolicited_Report_Count 2 #define IGMP_Initial_Report_Delay (1) /* IGMP_Initial_Report_Delay is not from IGMP specs! * IGMP specs require to report membership immediately after * joining a group, but we delay the first report by a * small interval. It seems more natural and still does not * contradict to specs provided this delay is small enough. */ #define IGMP_V1_SEEN(in_dev) \ (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \ IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \ ((in_dev)->mr_v1_seen && \ time_before(jiffies, (in_dev)->mr_v1_seen))) #define IGMP_V2_SEEN(in_dev) \ (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \ IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \ ((in_dev)->mr_v2_seen && \ time_before(jiffies, (in_dev)->mr_v2_seen))) static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr); static void igmpv3_clear_delrec(struct in_device *in_dev); static int sf_setstate(struct ip_mc_list *pmc); static void sf_markstate(struct ip_mc_list *pmc); #endif static void ip_mc_clear_src(struct ip_mc_list *pmc); static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, int sfcount, __be32 *psfsrc, int delta); static void ip_ma_put(struct ip_mc_list *im) { if (atomic_dec_and_test(&im->refcnt)) { in_dev_put(im->interface); kfree_rcu(im, rcu); } } #define for_each_pmc_rcu(in_dev, pmc) \ for (pmc = rcu_dereference(in_dev->mc_list); \ pmc != NULL; \ pmc = rcu_dereference(pmc->next_rcu)) #define for_each_pmc_rtnl(in_dev, pmc) \ for (pmc = rtnl_dereference(in_dev->mc_list); \ pmc != NULL; \ pmc = rtnl_dereference(pmc->next_rcu)) #ifdef CONFIG_IP_MULTICAST /* * Timer management */ static void igmp_stop_timer(struct ip_mc_list *im) { spin_lock_bh(&im->lock); if (del_timer(&im->timer)) atomic_dec(&im->refcnt); im->tm_running = 0; im->reporter = 0; im->unsolicit_count = 0; spin_unlock_bh(&im->lock); } /* It must be called with locked im->lock */ static void igmp_start_timer(struct ip_mc_list *im, int max_delay) { int tv = net_random() % max_delay; im->tm_running = 1; if (!mod_timer(&im->timer, jiffies+tv+2)) atomic_inc(&im->refcnt); } static void igmp_gq_start_timer(struct in_device *in_dev) { int tv = net_random() % in_dev->mr_maxdelay; in_dev->mr_gq_running = 1; if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2)) in_dev_hold(in_dev); } static void igmp_ifc_start_timer(struct in_device *in_dev, int delay) { int tv = net_random() % delay; if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2)) in_dev_hold(in_dev); } static void igmp_mod_timer(struct ip_mc_list *im, int max_delay) { spin_lock_bh(&im->lock); im->unsolicit_count = 0; if (del_timer(&im->timer)) { if ((long)(im->timer.expires-jiffies) < max_delay) { add_timer(&im->timer); im->tm_running = 1; spin_unlock_bh(&im->lock); return; } atomic_dec(&im->refcnt); } igmp_start_timer(im, max_delay); spin_unlock_bh(&im->lock); } /* * Send an IGMP report. */ #define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4) static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type, int gdeleted, int sdeleted) { switch (type) { case IGMPV3_MODE_IS_INCLUDE: case IGMPV3_MODE_IS_EXCLUDE: if (gdeleted || sdeleted) return 0; if (!(pmc->gsquery && !psf->sf_gsresp)) { if (pmc->sfmode == MCAST_INCLUDE) return 1; /* don't include if this source is excluded * in all filters */ if (psf->sf_count[MCAST_INCLUDE]) return type == IGMPV3_MODE_IS_INCLUDE; return pmc->sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; } return 0; case IGMPV3_CHANGE_TO_INCLUDE: if (gdeleted || sdeleted) return 0; return psf->sf_count[MCAST_INCLUDE] != 0; case IGMPV3_CHANGE_TO_EXCLUDE: if (gdeleted || sdeleted) return 0; if (pmc->sfcount[MCAST_EXCLUDE] == 0 || psf->sf_count[MCAST_INCLUDE]) return 0; return pmc->sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; case IGMPV3_ALLOW_NEW_SOURCES: if (gdeleted || !psf->sf_crcount) return 0; return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted; case IGMPV3_BLOCK_OLD_SOURCES: if (pmc->sfmode == MCAST_INCLUDE) return gdeleted || (psf->sf_crcount && sdeleted); return psf->sf_crcount && !gdeleted && !sdeleted; } return 0; } static int igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) { struct ip_sf_list *psf; int scount = 0; for (psf=pmc->sources; psf; psf=psf->sf_next) { if (!is_in(pmc, psf, type, gdeleted, sdeleted)) continue; scount++; } return scount; } #define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb)) static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) { struct sk_buff *skb; struct rtable *rt; struct iphdr *pip; struct igmpv3_report *pig; struct net *net = dev_net(dev); struct flowi4 fl4; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; while (1) { skb = alloc_skb(size + hlen + tlen, GFP_ATOMIC | __GFP_NOWARN); if (skb) break; size >>= 1; if (size < 256) return NULL; } igmp_skb_size(skb) = size; rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, 0, 0, IPPROTO_IGMP, 0, dev->ifindex); if (IS_ERR(rt)) { kfree_skb(skb); return NULL; } skb_dst_set(skb, &rt->dst); skb->dev = dev; skb_reserve(skb, hlen); skb_reset_network_header(skb); pip = ip_hdr(skb); skb_put(skb, sizeof(struct iphdr) + 4); pip->version = 4; pip->ihl = (sizeof(struct iphdr)+4)>>2; pip->tos = 0xc0; pip->frag_off = htons(IP_DF); pip->ttl = 1; pip->daddr = fl4.daddr; pip->saddr = fl4.saddr; pip->protocol = IPPROTO_IGMP; pip->tot_len = 0; /* filled in later */ ip_select_ident(pip, &rt->dst, NULL); ((u8*)&pip[1])[0] = IPOPT_RA; ((u8*)&pip[1])[1] = 4; ((u8*)&pip[1])[2] = 0; ((u8*)&pip[1])[3] = 0; skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4; skb_put(skb, sizeof(*pig)); pig = igmpv3_report_hdr(skb); pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT; pig->resv1 = 0; pig->csum = 0; pig->resv2 = 0; pig->ngrec = 0; return skb; } static int igmpv3_sendpack(struct sk_buff *skb) { struct igmphdr *pig = igmp_hdr(skb); const int igmplen = skb->tail - skb->transport_header; pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen); return ip_local_out(skb); } static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) { return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel); } static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, int type, struct igmpv3_grec **ppgr) { struct net_device *dev = pmc->interface->dev; struct igmpv3_report *pih; struct igmpv3_grec *pgr; if (!skb) skb = igmpv3_newpack(dev, dev->mtu); if (!skb) return NULL; pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec)); pgr->grec_type = type; pgr->grec_auxwords = 0; pgr->grec_nsrcs = 0; pgr->grec_mca = pmc->multiaddr; pih = igmpv3_report_hdr(skb); pih->ngrec = htons(ntohs(pih->ngrec)+1); *ppgr = pgr; return skb; } #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \ skb_tailroom(skb)) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) { struct net_device *dev = pmc->interface->dev; struct igmpv3_report *pih; struct igmpv3_grec *pgr = NULL; struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; int scount, stotal, first, isquery, truncate; if (pmc->multiaddr == IGMP_ALL_HOSTS) return skb; isquery = type == IGMPV3_MODE_IS_INCLUDE || type == IGMPV3_MODE_IS_EXCLUDE; truncate = type == IGMPV3_MODE_IS_EXCLUDE || type == IGMPV3_CHANGE_TO_EXCLUDE; stotal = scount = 0; psf_list = sdeleted ? &pmc->tomb : &pmc->sources; if (!*psf_list) goto empty_source; pih = skb ? igmpv3_report_hdr(skb) : NULL; /* EX and TO_EX get a fresh packet, if needed */ if (truncate) { if (pih && pih->ngrec && AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { if (skb) igmpv3_sendpack(skb); skb = igmpv3_newpack(dev, dev->mtu); } } first = 1; psf_prev = NULL; for (psf=*psf_list; psf; psf=psf_next) { __be32 *psrc; psf_next = psf->sf_next; if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { psf_prev = psf; continue; } /* clear marks on query responses */ if (isquery) psf->sf_gsresp = 0; if (AVAILABLE(skb) < sizeof(__be32) + first*sizeof(struct igmpv3_grec)) { if (truncate && !first) break; /* truncate these */ if (pgr) pgr->grec_nsrcs = htons(scount); if (skb) igmpv3_sendpack(skb); skb = igmpv3_newpack(dev, dev->mtu); first = 1; scount = 0; } if (first) { skb = add_grhead(skb, pmc, type, &pgr); first = 0; } if (!skb) return NULL; psrc = (__be32 *)skb_put(skb, sizeof(__be32)); *psrc = psf->sf_inaddr; scount++; stotal++; if ((type == IGMPV3_ALLOW_NEW_SOURCES || type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) { psf->sf_crcount--; if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { if (psf_prev) psf_prev->sf_next = psf->sf_next; else *psf_list = psf->sf_next; kfree(psf); continue; } } psf_prev = psf; } empty_source: if (!stotal) { if (type == IGMPV3_ALLOW_NEW_SOURCES || type == IGMPV3_BLOCK_OLD_SOURCES) return skb; if (pmc->crcount || isquery) { /* make sure we have room for group header */ if (skb && AVAILABLE(skb)<sizeof(struct igmpv3_grec)) { igmpv3_sendpack(skb); skb = NULL; /* add_grhead will get a new one */ } skb = add_grhead(skb, pmc, type, &pgr); } } if (pgr) pgr->grec_nsrcs = htons(scount); if (isquery) pmc->gsquery = 0; /* clear query state on report */ return skb; } static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) { struct sk_buff *skb = NULL; int type; if (!pmc) { rcu_read_lock(); for_each_pmc_rcu(in_dev, pmc) { if (pmc->multiaddr == IGMP_ALL_HOSTS) continue; spin_lock_bh(&pmc->lock); if (pmc->sfcount[MCAST_EXCLUDE]) type = IGMPV3_MODE_IS_EXCLUDE; else type = IGMPV3_MODE_IS_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->lock); } rcu_read_unlock(); } else { spin_lock_bh(&pmc->lock); if (pmc->sfcount[MCAST_EXCLUDE]) type = IGMPV3_MODE_IS_EXCLUDE; else type = IGMPV3_MODE_IS_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->lock); } if (!skb) return 0; return igmpv3_sendpack(skb); } /* * remove zero-count source records from a source filter list */ static void igmpv3_clear_zeros(struct ip_sf_list **ppsf) { struct ip_sf_list *psf_prev, *psf_next, *psf; psf_prev = NULL; for (psf=*ppsf; psf; psf = psf_next) { psf_next = psf->sf_next; if (psf->sf_crcount == 0) { if (psf_prev) psf_prev->sf_next = psf->sf_next; else *ppsf = psf->sf_next; kfree(psf); } else psf_prev = psf; } } static void igmpv3_send_cr(struct in_device *in_dev) { struct ip_mc_list *pmc, *pmc_prev, *pmc_next; struct sk_buff *skb = NULL; int type, dtype; rcu_read_lock(); spin_lock_bh(&in_dev->mc_tomb_lock); /* deleted MCA's */ pmc_prev = NULL; for (pmc=in_dev->mc_tomb; pmc; pmc=pmc_next) { pmc_next = pmc->next; if (pmc->sfmode == MCAST_INCLUDE) { type = IGMPV3_BLOCK_OLD_SOURCES; dtype = IGMPV3_BLOCK_OLD_SOURCES; skb = add_grec(skb, pmc, type, 1, 0); skb = add_grec(skb, pmc, dtype, 1, 1); } if (pmc->crcount) { if (pmc->sfmode == MCAST_EXCLUDE) { type = IGMPV3_CHANGE_TO_INCLUDE; skb = add_grec(skb, pmc, type, 1, 0); } pmc->crcount--; if (pmc->crcount == 0) { igmpv3_clear_zeros(&pmc->tomb); igmpv3_clear_zeros(&pmc->sources); } } if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) { if (pmc_prev) pmc_prev->next = pmc_next; else in_dev->mc_tomb = pmc_next; in_dev_put(pmc->interface); kfree(pmc); } else pmc_prev = pmc; } spin_unlock_bh(&in_dev->mc_tomb_lock); /* change recs */ for_each_pmc_rcu(in_dev, pmc) { spin_lock_bh(&pmc->lock); if (pmc->sfcount[MCAST_EXCLUDE]) { type = IGMPV3_BLOCK_OLD_SOURCES; dtype = IGMPV3_ALLOW_NEW_SOURCES; } else { type = IGMPV3_ALLOW_NEW_SOURCES; dtype = IGMPV3_BLOCK_OLD_SOURCES; } skb = add_grec(skb, pmc, type, 0, 0); skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */ /* filter mode changes */ if (pmc->crcount) { if (pmc->sfmode == MCAST_EXCLUDE) type = IGMPV3_CHANGE_TO_EXCLUDE; else type = IGMPV3_CHANGE_TO_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0); pmc->crcount--; } spin_unlock_bh(&pmc->lock); } rcu_read_unlock(); if (!skb) return; (void) igmpv3_sendpack(skb); } static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, int type) { struct sk_buff *skb; struct iphdr *iph; struct igmphdr *ih; struct rtable *rt; struct net_device *dev = in_dev->dev; struct net *net = dev_net(dev); __be32 group = pmc ? pmc->multiaddr : 0; struct flowi4 fl4; __be32 dst; int hlen, tlen; if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) return igmpv3_send_report(in_dev, pmc); else if (type == IGMP_HOST_LEAVE_MESSAGE) dst = IGMP_ALL_ROUTER; else dst = group; rt = ip_route_output_ports(net, &fl4, NULL, dst, 0, 0, 0, IPPROTO_IGMP, 0, dev->ifindex); if (IS_ERR(rt)) return -1; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); if (skb == NULL) { ip_rt_put(rt); return -1; } skb_dst_set(skb, &rt->dst); skb_reserve(skb, hlen); skb_reset_network_header(skb); iph = ip_hdr(skb); skb_put(skb, sizeof(struct iphdr) + 4); iph->version = 4; iph->ihl = (sizeof(struct iphdr)+4)>>2; iph->tos = 0xc0; iph->frag_off = htons(IP_DF); iph->ttl = 1; iph->daddr = dst; iph->saddr = fl4.saddr; iph->protocol = IPPROTO_IGMP; ip_select_ident(iph, &rt->dst, NULL); ((u8*)&iph[1])[0] = IPOPT_RA; ((u8*)&iph[1])[1] = 4; ((u8*)&iph[1])[2] = 0; ((u8*)&iph[1])[3] = 0; ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); ih->type = type; ih->code = 0; ih->csum = 0; ih->group = group; ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); return ip_local_out(skb); } static void igmp_gq_timer_expire(unsigned long data) { struct in_device *in_dev = (struct in_device *)data; in_dev->mr_gq_running = 0; igmpv3_send_report(in_dev, NULL); __in_dev_put(in_dev); } static void igmp_ifc_timer_expire(unsigned long data) { struct in_device *in_dev = (struct in_device *)data; igmpv3_send_cr(in_dev); if (in_dev->mr_ifc_count) { in_dev->mr_ifc_count--; igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval); } __in_dev_put(in_dev); } static void igmp_ifc_event(struct in_device *in_dev) { if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) return; in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv : IGMP_Unsolicited_Report_Count; igmp_ifc_start_timer(in_dev, 1); } static void igmp_timer_expire(unsigned long data) { struct ip_mc_list *im=(struct ip_mc_list *)data; struct in_device *in_dev = im->interface; spin_lock(&im->lock); im->tm_running = 0; if (im->unsolicit_count) { im->unsolicit_count--; igmp_start_timer(im, IGMP_Unsolicited_Report_Interval); } im->reporter = 1; spin_unlock(&im->lock); if (IGMP_V1_SEEN(in_dev)) igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT); else if (IGMP_V2_SEEN(in_dev)) igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT); else igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT); ip_ma_put(im); } /* mark EXCLUDE-mode sources */ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) { struct ip_sf_list *psf; int i, scount; scount = 0; for (psf=pmc->sources; psf; psf=psf->sf_next) { if (scount == nsrcs) break; for (i=0; i<nsrcs; i++) { /* skip inactive filters */ if (psf->sf_count[MCAST_INCLUDE] || pmc->sfcount[MCAST_EXCLUDE] != psf->sf_count[MCAST_EXCLUDE]) continue; if (srcs[i] == psf->sf_inaddr) { scount++; break; } } } pmc->gsquery = 0; if (scount == nsrcs) /* all sources excluded */ return 0; return 1; } static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) { struct ip_sf_list *psf; int i, scount; if (pmc->sfmode == MCAST_EXCLUDE) return igmp_xmarksources(pmc, nsrcs, srcs); /* mark INCLUDE-mode sources */ scount = 0; for (psf=pmc->sources; psf; psf=psf->sf_next) { if (scount == nsrcs) break; for (i=0; i<nsrcs; i++) if (srcs[i] == psf->sf_inaddr) { psf->sf_gsresp = 1; scount++; break; } } if (!scount) { pmc->gsquery = 0; return 0; } pmc->gsquery = 1; return 1; } static void igmp_heard_report(struct in_device *in_dev, __be32 group) { struct ip_mc_list *im; /* Timers are only set for non-local groups */ if (group == IGMP_ALL_HOSTS) return; rcu_read_lock(); for_each_pmc_rcu(in_dev, im) { if (im->multiaddr == group) { igmp_stop_timer(im); break; } } rcu_read_unlock(); } static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, int len) { struct igmphdr *ih = igmp_hdr(skb); struct igmpv3_query *ih3 = igmpv3_query_hdr(skb); struct ip_mc_list *im; __be32 group = ih->group; int max_delay; int mark = 0; if (len == 8) { if (ih->code == 0) { /* Alas, old v1 router presents here. */ max_delay = IGMP_Query_Response_Interval; in_dev->mr_v1_seen = jiffies + IGMP_V1_Router_Present_Timeout; group = 0; } else { /* v2 router present */ max_delay = ih->code*(HZ/IGMP_TIMER_SCALE); in_dev->mr_v2_seen = jiffies + IGMP_V2_Router_Present_Timeout; } /* cancel the interface change timer */ in_dev->mr_ifc_count = 0; if (del_timer(&in_dev->mr_ifc_timer)) __in_dev_put(in_dev); /* clear deleted report items */ igmpv3_clear_delrec(in_dev); } else if (len < 12) { return; /* ignore bogus packet; freed by caller */ } else if (IGMP_V1_SEEN(in_dev)) { /* This is a v3 query with v1 queriers present */ max_delay = IGMP_Query_Response_Interval; group = 0; } else if (IGMP_V2_SEEN(in_dev)) { /* this is a v3 query with v2 queriers present; * Interpretation of the max_delay code is problematic here. * A real v2 host would use ih_code directly, while v3 has a * different encoding. We use the v3 encoding as more likely * to be intended in a v3 query. */ max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); } else { /* v3 */ if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) return; ih3 = igmpv3_query_hdr(skb); if (ih3->nsrcs) { if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) + ntohs(ih3->nsrcs)*sizeof(__be32))) return; ih3 = igmpv3_query_hdr(skb); } max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); if (!max_delay) max_delay = 1; /* can't mod w/ 0 */ in_dev->mr_maxdelay = max_delay; if (ih3->qrv) in_dev->mr_qrv = ih3->qrv; if (!group) { /* general query */ if (ih3->nsrcs) return; /* no sources allowed */ igmp_gq_start_timer(in_dev); return; } /* mark sources to include, if group & source-specific */ mark = ih3->nsrcs != 0; } /* * - Start the timers in all of our membership records * that the query applies to for the interface on * which the query arrived excl. those that belong * to a "local" group (224.0.0.X) * - For timers already running check if they need to * be reset. * - Use the igmp->igmp_code field as the maximum * delay possible */ rcu_read_lock(); for_each_pmc_rcu(in_dev, im) { int changed; if (group && group != im->multiaddr) continue; if (im->multiaddr == IGMP_ALL_HOSTS) continue; spin_lock_bh(&im->lock); if (im->tm_running) im->gsquery = im->gsquery && mark; else im->gsquery = mark; changed = !im->gsquery || igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs); spin_unlock_bh(&im->lock); if (changed) igmp_mod_timer(im, max_delay); } rcu_read_unlock(); } /* called in rcu_read_lock() section */ int igmp_rcv(struct sk_buff *skb) { /* This basically follows the spec line by line -- see RFC1112 */ struct igmphdr *ih; struct in_device *in_dev = __in_dev_get_rcu(skb->dev); int len = skb->len; if (in_dev == NULL) goto drop; if (!pskb_may_pull(skb, sizeof(struct igmphdr))) goto drop; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_fold(skb->csum)) break; /* fall through */ case CHECKSUM_NONE: skb->csum = 0; if (__skb_checksum_complete(skb)) goto drop; } ih = igmp_hdr(skb); switch (ih->type) { case IGMP_HOST_MEMBERSHIP_QUERY: igmp_heard_query(in_dev, skb, len); break; case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: /* Is it our report looped back? */ if (rt_is_output_route(skb_rtable(skb))) break; /* don't rely on MC router hearing unicast reports */ if (skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST) igmp_heard_report(in_dev, ih->group); break; case IGMP_PIM: #ifdef CONFIG_IP_PIMSM_V1 return pim_rcv_v1(skb); #endif case IGMPV3_HOST_MEMBERSHIP_REPORT: case IGMP_DVMRP: case IGMP_TRACE: case IGMP_HOST_LEAVE_MESSAGE: case IGMP_MTRACE: case IGMP_MTRACE_RESP: break; default: break; } drop: kfree_skb(skb); return 0; } #endif /* * Add a filter to a device */ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) { char buf[MAX_ADDR_LEN]; struct net_device *dev = in_dev->dev; /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG. We will get multicast token leakage, when IFF_MULTICAST is changed. This check should be done in ndo_set_rx_mode routine. Something sort of: if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; } --ANK */ if (arp_mc_map(addr, buf, dev, 0) == 0) dev_mc_add(dev, buf); } /* * Remove a filter from a device */ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) { char buf[MAX_ADDR_LEN]; struct net_device *dev = in_dev->dev; if (arp_mc_map(addr, buf, dev, 0) == 0) dev_mc_del(dev, buf); } #ifdef CONFIG_IP_MULTICAST /* * deleted ip_mc_list manipulation */ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) { struct ip_mc_list *pmc; /* this is an "ip_mc_list" for convenience; only the fields below * are actually used. In particular, the refcnt and users are not * used for management of the delete list. Using the same structure * for deleted items allows change reports to use common code with * non-deleted or query-response MCA's. */ pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); if (!pmc) return; spin_lock_bh(&im->lock); pmc->interface = im->interface; in_dev_hold(in_dev); pmc->multiaddr = im->multiaddr; pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : IGMP_Unsolicited_Report_Count; pmc->sfmode = im->sfmode; if (pmc->sfmode == MCAST_INCLUDE) { struct ip_sf_list *psf; pmc->tomb = im->tomb; pmc->sources = im->sources; im->tomb = im->sources = NULL; for (psf=pmc->sources; psf; psf=psf->sf_next) psf->sf_crcount = pmc->crcount; } spin_unlock_bh(&im->lock); spin_lock_bh(&in_dev->mc_tomb_lock); pmc->next = in_dev->mc_tomb; in_dev->mc_tomb = pmc; spin_unlock_bh(&in_dev->mc_tomb_lock); } static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) { struct ip_mc_list *pmc, *pmc_prev; struct ip_sf_list *psf, *psf_next; spin_lock_bh(&in_dev->mc_tomb_lock); pmc_prev = NULL; for (pmc=in_dev->mc_tomb; pmc; pmc=pmc->next) { if (pmc->multiaddr == multiaddr) break; pmc_prev = pmc; } if (pmc) { if (pmc_prev) pmc_prev->next = pmc->next; else in_dev->mc_tomb = pmc->next; } spin_unlock_bh(&in_dev->mc_tomb_lock); if (pmc) { for (psf=pmc->tomb; psf; psf=psf_next) { psf_next = psf->sf_next; kfree(psf); } in_dev_put(pmc->interface); kfree(pmc); } } static void igmpv3_clear_delrec(struct in_device *in_dev) { struct ip_mc_list *pmc, *nextpmc; spin_lock_bh(&in_dev->mc_tomb_lock); pmc = in_dev->mc_tomb; in_dev->mc_tomb = NULL; spin_unlock_bh(&in_dev->mc_tomb_lock); for (; pmc; pmc = nextpmc) { nextpmc = pmc->next; ip_mc_clear_src(pmc); in_dev_put(pmc->interface); kfree(pmc); } /* clear dead sources, too */ rcu_read_lock(); for_each_pmc_rcu(in_dev, pmc) { struct ip_sf_list *psf, *psf_next; spin_lock_bh(&pmc->lock); psf = pmc->tomb; pmc->tomb = NULL; spin_unlock_bh(&pmc->lock); for (; psf; psf=psf_next) { psf_next = psf->sf_next; kfree(psf); } } rcu_read_unlock(); } #endif static void igmp_group_dropped(struct ip_mc_list *im) { struct in_device *in_dev = im->interface; #ifdef CONFIG_IP_MULTICAST int reporter; #endif if (im->loaded) { im->loaded = 0; ip_mc_filter_del(in_dev, im->multiaddr); } #ifdef CONFIG_IP_MULTICAST if (im->multiaddr == IGMP_ALL_HOSTS) return; reporter = im->reporter; igmp_stop_timer(im); if (!in_dev->dead) { if (IGMP_V1_SEEN(in_dev)) return; if (IGMP_V2_SEEN(in_dev)) { if (reporter) igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); return; } /* IGMPv3 */ igmpv3_add_delrec(in_dev, im); igmp_ifc_event(in_dev); } #endif } static void igmp_group_added(struct ip_mc_list *im) { struct in_device *in_dev = im->interface; if (im->loaded == 0) { im->loaded = 1; ip_mc_filter_add(in_dev, im->multiaddr); } #ifdef CONFIG_IP_MULTICAST if (im->multiaddr == IGMP_ALL_HOSTS) return; if (in_dev->dead) return; if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { spin_lock_bh(&im->lock); igmp_start_timer(im, IGMP_Initial_Report_Delay); spin_unlock_bh(&im->lock); return; } /* else, v3 */ im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : IGMP_Unsolicited_Report_Count; igmp_ifc_event(in_dev); #endif } /* * Multicast list managers */ /* * A socket has joined a multicast group on device dev. */ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) { struct ip_mc_list *im; ASSERT_RTNL(); for_each_pmc_rtnl(in_dev, im) { if (im->multiaddr == addr) { im->users++; ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0); goto out; } } im = kzalloc(sizeof(*im), GFP_KERNEL); if (!im) goto out; im->users = 1; im->interface = in_dev; in_dev_hold(in_dev); im->multiaddr = addr; /* initial mode is (EX, empty) */ im->sfmode = MCAST_EXCLUDE; im->sfcount[MCAST_EXCLUDE] = 1; atomic_set(&im->refcnt, 1); spin_lock_init(&im->lock); #ifdef CONFIG_IP_MULTICAST setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im); im->unsolicit_count = IGMP_Unsolicited_Report_Count; #endif im->next_rcu = in_dev->mc_list; in_dev->mc_count++; RCU_INIT_POINTER(in_dev->mc_list, im); #ifdef CONFIG_IP_MULTICAST igmpv3_del_delrec(in_dev, im->multiaddr); #endif igmp_group_added(im); if (!in_dev->dead) ip_rt_multicast_event(in_dev); out: return; } EXPORT_SYMBOL(ip_mc_inc_group); /* * Resend IGMP JOIN report; used for bonding. * Called with rcu_read_lock() */ void ip_mc_rejoin_groups(struct in_device *in_dev) { #ifdef CONFIG_IP_MULTICAST struct ip_mc_list *im; int type; for_each_pmc_rcu(in_dev, im) { if (im->multiaddr == IGMP_ALL_HOSTS) continue; /* a failover is happening and switches * must be notified immediately */ if (IGMP_V1_SEEN(in_dev)) type = IGMP_HOST_MEMBERSHIP_REPORT; else if (IGMP_V2_SEEN(in_dev)) type = IGMPV2_HOST_MEMBERSHIP_REPORT; else type = IGMPV3_HOST_MEMBERSHIP_REPORT; igmp_send_report(in_dev, im, type); } #endif } EXPORT_SYMBOL(ip_mc_rejoin_groups); /* * A socket has left a multicast group on device dev */ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) { struct ip_mc_list *i; struct ip_mc_list __rcu **ip; ASSERT_RTNL(); for (ip = &in_dev->mc_list; (i = rtnl_dereference(*ip)) != NULL; ip = &i->next_rcu) { if (i->multiaddr == addr) { if (--i->users == 0) { *ip = i->next_rcu; in_dev->mc_count--; igmp_group_dropped(i); ip_mc_clear_src(i); if (!in_dev->dead) ip_rt_multicast_event(in_dev); ip_ma_put(i); return; } break; } } } EXPORT_SYMBOL(ip_mc_dec_group); /* Device changing type */ void ip_mc_unmap(struct in_device *in_dev) { struct ip_mc_list *pmc; ASSERT_RTNL(); for_each_pmc_rtnl(in_dev, pmc) igmp_group_dropped(pmc); } void ip_mc_remap(struct in_device *in_dev) { struct ip_mc_list *pmc; ASSERT_RTNL(); for_each_pmc_rtnl(in_dev, pmc) igmp_group_added(pmc); } /* Device going down */ void ip_mc_down(struct in_device *in_dev) { struct ip_mc_list *pmc; ASSERT_RTNL(); for_each_pmc_rtnl(in_dev, pmc) igmp_group_dropped(pmc); #ifdef CONFIG_IP_MULTICAST in_dev->mr_ifc_count = 0; if (del_timer(&in_dev->mr_ifc_timer)) __in_dev_put(in_dev); in_dev->mr_gq_running = 0; if (del_timer(&in_dev->mr_gq_timer)) __in_dev_put(in_dev); igmpv3_clear_delrec(in_dev); #endif ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); } void ip_mc_init_dev(struct in_device *in_dev) { ASSERT_RTNL(); in_dev->mc_tomb = NULL; #ifdef CONFIG_IP_MULTICAST in_dev->mr_gq_running = 0; setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire, (unsigned long)in_dev); in_dev->mr_ifc_count = 0; in_dev->mc_count = 0; setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, (unsigned long)in_dev); in_dev->mr_qrv = IGMP_Unsolicited_Report_Count; #endif spin_lock_init(&in_dev->mc_tomb_lock); } /* Device going up */ void ip_mc_up(struct in_device *in_dev) { struct ip_mc_list *pmc; ASSERT_RTNL(); ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); for_each_pmc_rtnl(in_dev, pmc) igmp_group_added(pmc); } /* * Device is about to be destroyed: clean up. */ void ip_mc_destroy_dev(struct in_device *in_dev) { struct ip_mc_list *i; ASSERT_RTNL(); /* Deactivate timers */ ip_mc_down(in_dev); while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { in_dev->mc_list = i->next_rcu; in_dev->mc_count--; /* We've dropped the groups in ip_mc_down already */ ip_mc_clear_src(i); ip_ma_put(i); } } /* RTNL is locked */ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) { struct net_device *dev = NULL; struct in_device *idev = NULL; if (imr->imr_ifindex) { idev = inetdev_by_index(net, imr->imr_ifindex); return idev; } if (imr->imr_address.s_addr) { dev = __ip_dev_find(net, imr->imr_address.s_addr, false); if (!dev) return NULL; } if (!dev) { struct rtable *rt = ip_route_output(net, imr->imr_multiaddr.s_addr, 0, 0, 0); if (!IS_ERR(rt)) { dev = rt->dst.dev; ip_rt_put(rt); } } if (dev) { imr->imr_ifindex = dev->ifindex; idev = __in_dev_get_rtnl(dev); } return idev; } /* * Join a socket to a group */ int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS; int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF; static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, __be32 *psfsrc) { struct ip_sf_list *psf, *psf_prev; int rv = 0; psf_prev = NULL; for (psf=pmc->sources; psf; psf=psf->sf_next) { if (psf->sf_inaddr == *psfsrc) break; psf_prev = psf; } if (!psf || psf->sf_count[sfmode] == 0) { /* source filter not found, or count wrong => bug */ return -ESRCH; } psf->sf_count[sfmode]--; if (psf->sf_count[sfmode] == 0) { ip_rt_multicast_event(pmc->interface); } if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { #ifdef CONFIG_IP_MULTICAST struct in_device *in_dev = pmc->interface; #endif /* no more filters for this source */ if (psf_prev) psf_prev->sf_next = psf->sf_next; else pmc->sources = psf->sf_next; #ifdef CONFIG_IP_MULTICAST if (psf->sf_oldin && !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv : IGMP_Unsolicited_Report_Count; psf->sf_next = pmc->tomb; pmc->tomb = psf; rv = 1; } else #endif kfree(psf); } return rv; } #ifndef CONFIG_IP_MULTICAST #define igmp_ifc_event(x) do { } while (0) #endif static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, int sfcount, __be32 *psfsrc, int delta) { struct ip_mc_list *pmc; int changerec = 0; int i, err; if (!in_dev) return -ENODEV; rcu_read_lock(); for_each_pmc_rcu(in_dev, pmc) { if (*pmca == pmc->multiaddr) break; } if (!pmc) { /* MCA not found?? bug */ rcu_read_unlock(); return -ESRCH; } spin_lock_bh(&pmc->lock); rcu_read_unlock(); #ifdef CONFIG_IP_MULTICAST sf_markstate(pmc); #endif if (!delta) { err = -EINVAL; if (!pmc->sfcount[sfmode]) goto out_unlock; pmc->sfcount[sfmode]--; } err = 0; for (i=0; i<sfcount; i++) { int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]); changerec |= rv > 0; if (!err && rv < 0) err = rv; } if (pmc->sfmode == MCAST_EXCLUDE && pmc->sfcount[MCAST_EXCLUDE] == 0 && pmc->sfcount[MCAST_INCLUDE]) { #ifdef CONFIG_IP_MULTICAST struct ip_sf_list *psf; #endif /* filter mode change */ pmc->sfmode = MCAST_INCLUDE; #ifdef CONFIG_IP_MULTICAST pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : IGMP_Unsolicited_Report_Count; in_dev->mr_ifc_count = pmc->crcount; for (psf=pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; igmp_ifc_event(pmc->interface); } else if (sf_setstate(pmc) || changerec) { igmp_ifc_event(pmc->interface); #endif } out_unlock: spin_unlock_bh(&pmc->lock); return err; } /* * Add multicast single-source filter to the interface list */ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode, __be32 *psfsrc) { struct ip_sf_list *psf, *psf_prev; psf_prev = NULL; for (psf=pmc->sources; psf; psf=psf->sf_next) { if (psf->sf_inaddr == *psfsrc) break; psf_prev = psf; } if (!psf) { psf = kzalloc(sizeof(*psf), GFP_ATOMIC); if (!psf) return -ENOBUFS; psf->sf_inaddr = *psfsrc; if (psf_prev) { psf_prev->sf_next = psf; } else pmc->sources = psf; } psf->sf_count[sfmode]++; if (psf->sf_count[sfmode] == 1) { ip_rt_multicast_event(pmc->interface); } return 0; } #ifdef CONFIG_IP_MULTICAST static void sf_markstate(struct ip_mc_list *pmc) { struct ip_sf_list *psf; int mca_xcount = pmc->sfcount[MCAST_EXCLUDE]; for (psf=pmc->sources; psf; psf=psf->sf_next) if (pmc->sfcount[MCAST_EXCLUDE]) { psf->sf_oldin = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && !psf->sf_count[MCAST_INCLUDE]; } else psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0; } static int sf_setstate(struct ip_mc_list *pmc) { struct ip_sf_list *psf, *dpsf; int mca_xcount = pmc->sfcount[MCAST_EXCLUDE]; int qrv = pmc->interface->mr_qrv; int new_in, rv; rv = 0; for (psf=pmc->sources; psf; psf=psf->sf_next) { if (pmc->sfcount[MCAST_EXCLUDE]) { new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && !psf->sf_count[MCAST_INCLUDE]; } else new_in = psf->sf_count[MCAST_INCLUDE] != 0; if (new_in) { if (!psf->sf_oldin) { struct ip_sf_list *prev = NULL; for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) { if (dpsf->sf_inaddr == psf->sf_inaddr) break; prev = dpsf; } if (dpsf) { if (prev) prev->sf_next = dpsf->sf_next; else pmc->tomb = dpsf->sf_next; kfree(dpsf); } psf->sf_crcount = qrv; rv++; } } else if (psf->sf_oldin) { psf->sf_crcount = 0; /* * add or update "delete" records if an active filter * is now inactive */ for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) if (dpsf->sf_inaddr == psf->sf_inaddr) break; if (!dpsf) { dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); if (!dpsf) continue; *dpsf = *psf; /* pmc->lock held by callers */ dpsf->sf_next = pmc->tomb; pmc->tomb = dpsf; } dpsf->sf_crcount = qrv; rv++; } } return rv; } #endif /* * Add multicast source filter list to the interface list */ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, int sfcount, __be32 *psfsrc, int delta) { struct ip_mc_list *pmc; int isexclude; int i, err; if (!in_dev) return -ENODEV; rcu_read_lock(); for_each_pmc_rcu(in_dev, pmc) { if (*pmca == pmc->multiaddr) break; } if (!pmc) { /* MCA not found?? bug */ rcu_read_unlock(); return -ESRCH; } spin_lock_bh(&pmc->lock); rcu_read_unlock(); #ifdef CONFIG_IP_MULTICAST sf_markstate(pmc); #endif isexclude = pmc->sfmode == MCAST_EXCLUDE; if (!delta) pmc->sfcount[sfmode]++; err = 0; for (i=0; i<sfcount; i++) { err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]); if (err) break; } if (err) { int j; if (!delta) pmc->sfcount[sfmode]--; for (j=0; j<i; j++) (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]); } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { #ifdef CONFIG_IP_MULTICAST struct ip_sf_list *psf; in_dev = pmc->interface; #endif /* filter mode change */ if (pmc->sfcount[MCAST_EXCLUDE]) pmc->sfmode = MCAST_EXCLUDE; else if (pmc->sfcount[MCAST_INCLUDE]) pmc->sfmode = MCAST_INCLUDE; #ifdef CONFIG_IP_MULTICAST /* else no filters; keep old mode for reports */ pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : IGMP_Unsolicited_Report_Count; in_dev->mr_ifc_count = pmc->crcount; for (psf=pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; igmp_ifc_event(in_dev); } else if (sf_setstate(pmc)) { igmp_ifc_event(in_dev); #endif } spin_unlock_bh(&pmc->lock); return err; } static void ip_mc_clear_src(struct ip_mc_list *pmc) { struct ip_sf_list *psf, *nextpsf; for (psf=pmc->tomb; psf; psf=nextpsf) { nextpsf = psf->sf_next; kfree(psf); } pmc->tomb = NULL; for (psf=pmc->sources; psf; psf=nextpsf) { nextpsf = psf->sf_next; kfree(psf); } pmc->sources = NULL; pmc->sfmode = MCAST_EXCLUDE; pmc->sfcount[MCAST_INCLUDE] = 0; pmc->sfcount[MCAST_EXCLUDE] = 1; } /* * Join a multicast group */ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) { int err; __be32 addr = imr->imr_multiaddr.s_addr; struct ip_mc_socklist *iml = NULL, *i; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); int ifindex; int count = 0; if (!ipv4_is_multicast(addr)) return -EINVAL; rtnl_lock(); in_dev = ip_mc_find_dev(net, imr); if (!in_dev) { iml = NULL; err = -ENODEV; goto done; } err = -EADDRINUSE; ifindex = imr->imr_ifindex; for_each_pmc_rtnl(inet, i) { if (i->multi.imr_multiaddr.s_addr == addr && i->multi.imr_ifindex == ifindex) goto done; count++; } err = -ENOBUFS; if (count >= sysctl_igmp_max_memberships) goto done; iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); if (iml == NULL) goto done; memcpy(&iml->multi, imr, sizeof(*imr)); iml->next_rcu = inet->mc_list; iml->sflist = NULL; iml->sfmode = MCAST_EXCLUDE; RCU_INIT_POINTER(inet->mc_list, iml); ip_mc_inc_group(in_dev, addr); err = 0; done: rtnl_unlock(); return err; } EXPORT_SYMBOL(ip_mc_join_group); static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, struct in_device *in_dev) { struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); int err; if (psf == NULL) { /* any-source empty exclude case */ return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, iml->sfmode, 0, NULL, 0); } err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, iml->sfmode, psf->sl_count, psf->sl_addr, 0); RCU_INIT_POINTER(iml->sflist, NULL); /* decrease mem now to avoid the memleak warning */ atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc); kfree_rcu(psf, rcu); return err; } /* * Ask a socket to leave a group. */ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml; struct ip_mc_socklist __rcu **imlp; struct in_device *in_dev; struct net *net = sock_net(sk); __be32 group = imr->imr_multiaddr.s_addr; u32 ifindex; int ret = -EADDRNOTAVAIL; rtnl_lock(); in_dev = ip_mc_find_dev(net, imr); ifindex = imr->imr_ifindex; for (imlp = &inet->mc_list; (iml = rtnl_dereference(*imlp)) != NULL; imlp = &iml->next_rcu) { if (iml->multi.imr_multiaddr.s_addr != group) continue; if (ifindex) { if (iml->multi.imr_ifindex != ifindex) continue; } else if (imr->imr_address.s_addr && imr->imr_address.s_addr != iml->multi.imr_address.s_addr) continue; (void) ip_mc_leave_src(sk, iml, in_dev); *imlp = iml->next_rcu; if (in_dev) ip_mc_dec_group(in_dev, group); rtnl_unlock(); /* decrease mem now to avoid the memleak warning */ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); kfree_rcu(iml, rcu); return 0; } if (!in_dev) ret = -ENODEV; rtnl_unlock(); return ret; } int ip_mc_source(int add, int omode, struct sock *sk, struct ip_mreq_source *mreqs, int ifindex) { int err; struct ip_mreqn imr; __be32 addr = mreqs->imr_multiaddr; struct ip_mc_socklist *pmc; struct in_device *in_dev = NULL; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *psl; struct net *net = sock_net(sk); int leavegroup = 0; int i, j, rv; if (!ipv4_is_multicast(addr)) return -EINVAL; rtnl_lock(); imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; imr.imr_address.s_addr = mreqs->imr_interface; imr.imr_ifindex = ifindex; in_dev = ip_mc_find_dev(net, &imr); if (!in_dev) { err = -ENODEV; goto done; } err = -EADDRNOTAVAIL; for_each_pmc_rtnl(inet, pmc) { if ((pmc->multi.imr_multiaddr.s_addr == imr.imr_multiaddr.s_addr) && (pmc->multi.imr_ifindex == imr.imr_ifindex)) break; } if (!pmc) { /* must have a prior join */ err = -EINVAL; goto done; } /* if a source filter was set, must be the same mode as before */ if (pmc->sflist) { if (pmc->sfmode != omode) { err = -EINVAL; goto done; } } else if (pmc->sfmode != omode) { /* allow mode switches for empty-set filters */ ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0, NULL, 0); pmc->sfmode = omode; } psl = rtnl_dereference(pmc->sflist); if (!add) { if (!psl) goto done; /* err = -EADDRNOTAVAIL */ rv = !0; for (i=0; i<psl->sl_count; i++) { rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, sizeof(__be32)); if (rv == 0) break; } if (rv) /* source not found */ goto done; /* err = -EADDRNOTAVAIL */ /* special case - (INCLUDE, empty) == LEAVE_GROUP */ if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { leavegroup = 1; goto done; } /* update the interface filter */ ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, &mreqs->imr_sourceaddr, 1); for (j=i+1; j<psl->sl_count; j++) psl->sl_addr[j-1] = psl->sl_addr[j]; psl->sl_count--; err = 0; goto done; } /* else, add a new source to the filter */ if (psl && psl->sl_count >= sysctl_igmp_max_msf) { err = -ENOBUFS; goto done; } if (!psl || psl->sl_count == psl->sl_max) { struct ip_sf_socklist *newpsl; int count = IP_SFBLOCK; if (psl) count += psl->sl_max; newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL); if (!newpsl) { err = -ENOBUFS; goto done; } newpsl->sl_max = count; newpsl->sl_count = count - IP_SFBLOCK; if (psl) { for (i=0; i<psl->sl_count; i++) newpsl->sl_addr[i] = psl->sl_addr[i]; /* decrease mem now to avoid the memleak warning */ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); kfree_rcu(psl, rcu); } RCU_INIT_POINTER(pmc->sflist, newpsl); psl = newpsl; } rv = 1; /* > 0 for insert logic below if sl_count is 0 */ for (i=0; i<psl->sl_count; i++) { rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, sizeof(__be32)); if (rv == 0) break; } if (rv == 0) /* address already there is an error */ goto done; for (j=psl->sl_count-1; j>=i; j--) psl->sl_addr[j+1] = psl->sl_addr[j]; psl->sl_addr[i] = mreqs->imr_sourceaddr; psl->sl_count++; err = 0; /* update the interface list */ ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1, &mreqs->imr_sourceaddr, 1); done: rtnl_unlock(); if (leavegroup) return ip_mc_leave_group(sk, &imr); return err; } int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) { int err = 0; struct ip_mreqn imr; __be32 addr = msf->imsf_multiaddr; struct ip_mc_socklist *pmc; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *newpsl, *psl; struct net *net = sock_net(sk); int leavegroup = 0; if (!ipv4_is_multicast(addr)) return -EINVAL; if (msf->imsf_fmode != MCAST_INCLUDE && msf->imsf_fmode != MCAST_EXCLUDE) return -EINVAL; rtnl_lock(); imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; imr.imr_address.s_addr = msf->imsf_interface; imr.imr_ifindex = ifindex; in_dev = ip_mc_find_dev(net, &imr); if (!in_dev) { err = -ENODEV; goto done; } /* special case - (INCLUDE, empty) == LEAVE_GROUP */ if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) { leavegroup = 1; goto done; } for_each_pmc_rtnl(inet, pmc) { if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && pmc->multi.imr_ifindex == imr.imr_ifindex) break; } if (!pmc) { /* must have a prior join */ err = -EINVAL; goto done; } if (msf->imsf_numsrc) { newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc), GFP_KERNEL); if (!newpsl) { err = -ENOBUFS; goto done; } newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc; memcpy(newpsl->sl_addr, msf->imsf_slist, msf->imsf_numsrc * sizeof(msf->imsf_slist[0])); err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr, msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0); if (err) { sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max)); goto done; } } else { newpsl = NULL; (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr, msf->imsf_fmode, 0, NULL, 0); } psl = rtnl_dereference(pmc->sflist); if (psl) { (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, psl->sl_count, psl->sl_addr, 0); /* decrease mem now to avoid the memleak warning */ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); kfree_rcu(psl, rcu); } else (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 0, NULL, 0); RCU_INIT_POINTER(pmc->sflist, newpsl); pmc->sfmode = msf->imsf_fmode; err = 0; done: rtnl_unlock(); if (leavegroup) err = ip_mc_leave_group(sk, &imr); return err; } int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, struct ip_msfilter __user *optval, int __user *optlen) { int err, len, count, copycount; struct ip_mreqn imr; __be32 addr = msf->imsf_multiaddr; struct ip_mc_socklist *pmc; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *psl; struct net *net = sock_net(sk); if (!ipv4_is_multicast(addr)) return -EINVAL; rtnl_lock(); imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; imr.imr_address.s_addr = msf->imsf_interface; imr.imr_ifindex = 0; in_dev = ip_mc_find_dev(net, &imr); if (!in_dev) { err = -ENODEV; goto done; } err = -EADDRNOTAVAIL; for_each_pmc_rtnl(inet, pmc) { if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && pmc->multi.imr_ifindex == imr.imr_ifindex) break; } if (!pmc) /* must have a prior join */ goto done; msf->imsf_fmode = pmc->sfmode; psl = rtnl_dereference(pmc->sflist); rtnl_unlock(); if (!psl) { len = 0; count = 0; } else { count = psl->sl_count; } copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc; len = copycount * sizeof(psl->sl_addr[0]); msf->imsf_numsrc = count; if (put_user(IP_MSFILTER_SIZE(copycount), optlen) || copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) { return -EFAULT; } if (len && copy_to_user(&optval->imsf_slist[0], psl->sl_addr, len)) return -EFAULT; return 0; done: rtnl_unlock(); return err; } int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, struct group_filter __user *optval, int __user *optlen) { int err, i, count, copycount; struct sockaddr_in *psin; __be32 addr; struct ip_mc_socklist *pmc; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *psl; psin = (struct sockaddr_in *)&gsf->gf_group; if (psin->sin_family != AF_INET) return -EINVAL; addr = psin->sin_addr.s_addr; if (!ipv4_is_multicast(addr)) return -EINVAL; rtnl_lock(); err = -EADDRNOTAVAIL; for_each_pmc_rtnl(inet, pmc) { if (pmc->multi.imr_multiaddr.s_addr == addr && pmc->multi.imr_ifindex == gsf->gf_interface) break; } if (!pmc) /* must have a prior join */ goto done; gsf->gf_fmode = pmc->sfmode; psl = rtnl_dereference(pmc->sflist); rtnl_unlock(); count = psl ? psl->sl_count : 0; copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; gsf->gf_numsrc = count; if (put_user(GROUP_FILTER_SIZE(copycount), optlen) || copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) { return -EFAULT; } for (i=0; i<copycount; i++) { struct sockaddr_storage ss; psin = (struct sockaddr_in *)&ss; memset(&ss, 0, sizeof(ss)); psin->sin_family = AF_INET; psin->sin_addr.s_addr = psl->sl_addr[i]; if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss))) return -EFAULT; } return 0; done: rtnl_unlock(); return err; } /* * check if a multicast source filter allows delivery for a given <src,dst,intf> */ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *pmc; struct ip_sf_socklist *psl; int i; int ret; ret = 1; if (!ipv4_is_multicast(loc_addr)) goto out; rcu_read_lock(); for_each_pmc_rcu(inet, pmc) { if (pmc->multi.imr_multiaddr.s_addr == loc_addr && pmc->multi.imr_ifindex == dif) break; } ret = inet->mc_all; if (!pmc) goto unlock; psl = rcu_dereference(pmc->sflist); ret = (pmc->sfmode == MCAST_EXCLUDE); if (!psl) goto unlock; for (i=0; i<psl->sl_count; i++) { if (psl->sl_addr[i] == rmt_addr) break; } ret = 0; if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) goto unlock; if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) goto unlock; ret = 1; unlock: rcu_read_unlock(); out: return ret; } /* * A socket is closing. */ void ip_mc_drop_socket(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml; struct net *net = sock_net(sk); if (inet->mc_list == NULL) return; rtnl_lock(); while ((iml = rtnl_dereference(inet->mc_list)) != NULL) { struct in_device *in_dev; inet->mc_list = iml->next_rcu; in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); (void) ip_mc_leave_src(sk, iml, in_dev); if (in_dev != NULL) ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); /* decrease mem now to avoid the memleak warning */ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); kfree_rcu(iml, rcu); } rtnl_unlock(); } /* called with rcu_read_lock() */ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto) { struct ip_mc_list *im; struct ip_sf_list *psf; int rv = 0; for_each_pmc_rcu(in_dev, im) { if (im->multiaddr == mc_addr) break; } if (im && proto == IPPROTO_IGMP) { rv = 1; } else if (im) { if (src_addr) { for (psf=im->sources; psf; psf=psf->sf_next) { if (psf->sf_inaddr == src_addr) break; } if (psf) rv = psf->sf_count[MCAST_INCLUDE] || psf->sf_count[MCAST_EXCLUDE] != im->sfcount[MCAST_EXCLUDE]; else rv = im->sfcount[MCAST_EXCLUDE] != 0; } else rv = 1; /* unspecified source; tentatively allow */ } return rv; } #if defined(CONFIG_PROC_FS) struct igmp_mc_iter_state { struct seq_net_private p; struct net_device *dev; struct in_device *in_dev; }; #define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private) static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) { struct net *net = seq_file_net(seq); struct ip_mc_list *im = NULL; struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); state->in_dev = NULL; for_each_netdev_rcu(net, state->dev) { struct in_device *in_dev; in_dev = __in_dev_get_rcu(state->dev); if (!in_dev) continue; im = rcu_dereference(in_dev->mc_list); if (im) { state->in_dev = in_dev; break; } } return im; } static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im) { struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); im = rcu_dereference(im->next_rcu); while (!im) { state->dev = next_net_device_rcu(state->dev); if (!state->dev) { state->in_dev = NULL; break; } state->in_dev = __in_dev_get_rcu(state->dev); if (!state->in_dev) continue; im = rcu_dereference(state->in_dev->mc_list); } return im; } static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos) { struct ip_mc_list *im = igmp_mc_get_first(seq); if (im) while (pos && (im = igmp_mc_get_next(seq, im)) != NULL) --pos; return pos ? NULL : im; } static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { rcu_read_lock(); return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip_mc_list *im; if (v == SEQ_START_TOKEN) im = igmp_mc_get_first(seq); else im = igmp_mc_get_next(seq, v); ++*pos; return im; } static void igmp_mc_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); state->in_dev = NULL; state->dev = NULL; rcu_read_unlock(); } static int igmp_mc_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n"); else { struct ip_mc_list *im = (struct ip_mc_list *)v; struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); char *querier; #ifdef CONFIG_IP_MULTICAST querier = IGMP_V1_SEEN(state->in_dev) ? "V1" : IGMP_V2_SEEN(state->in_dev) ? "V2" : "V3"; #else querier = "NONE"; #endif if (rcu_dereference(state->in_dev->mc_list) == im) { seq_printf(seq, "%d\t%-10s: %5d %7s\n", state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); } seq_printf(seq, "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n", im->multiaddr, im->users, im->tm_running, im->tm_running ? jiffies_to_clock_t(im->timer.expires-jiffies) : 0, im->reporter); } return 0; } static const struct seq_operations igmp_mc_seq_ops = { .start = igmp_mc_seq_start, .next = igmp_mc_seq_next, .stop = igmp_mc_seq_stop, .show = igmp_mc_seq_show, }; static int igmp_mc_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &igmp_mc_seq_ops, sizeof(struct igmp_mc_iter_state)); } static const struct file_operations igmp_mc_seq_fops = { .owner = THIS_MODULE, .open = igmp_mc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; struct igmp_mcf_iter_state { struct seq_net_private p; struct net_device *dev; struct in_device *idev; struct ip_mc_list *im; }; #define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private) static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) { struct net *net = seq_file_net(seq); struct ip_sf_list *psf = NULL; struct ip_mc_list *im = NULL; struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); state->idev = NULL; state->im = NULL; for_each_netdev_rcu(net, state->dev) { struct in_device *idev; idev = __in_dev_get_rcu(state->dev); if (unlikely(idev == NULL)) continue; im = rcu_dereference(idev->mc_list); if (likely(im != NULL)) { spin_lock_bh(&im->lock); psf = im->sources; if (likely(psf != NULL)) { state->im = im; state->idev = idev; break; } spin_unlock_bh(&im->lock); } } return psf; } static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf) { struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); psf = psf->sf_next; while (!psf) { spin_unlock_bh(&state->im->lock); state->im = state->im->next; while (!state->im) { state->dev = next_net_device_rcu(state->dev); if (!state->dev) { state->idev = NULL; goto out; } state->idev = __in_dev_get_rcu(state->dev); if (!state->idev) continue; state->im = rcu_dereference(state->idev->mc_list); } if (!state->im) break; spin_lock_bh(&state->im->lock); psf = state->im->sources; } out: return psf; } static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos) { struct ip_sf_list *psf = igmp_mcf_get_first(seq); if (psf) while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL) --pos; return pos ? NULL : psf; } static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { rcu_read_lock(); return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip_sf_list *psf; if (v == SEQ_START_TOKEN) psf = igmp_mcf_get_first(seq); else psf = igmp_mcf_get_next(seq, v); ++*pos; return psf; } static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); if (likely(state->im != NULL)) { spin_unlock_bh(&state->im->lock); state->im = NULL; } state->idev = NULL; state->dev = NULL; rcu_read_unlock(); } static int igmp_mcf_seq_show(struct seq_file *seq, void *v) { struct ip_sf_list *psf = (struct ip_sf_list *)v; struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); if (v == SEQ_START_TOKEN) { seq_printf(seq, "%3s %6s " "%10s %10s %6s %6s\n", "Idx", "Device", "MCA", "SRC", "INC", "EXC"); } else { seq_printf(seq, "%3d %6.6s 0x%08x " "0x%08x %6lu %6lu\n", state->dev->ifindex, state->dev->name, ntohl(state->im->multiaddr), ntohl(psf->sf_inaddr), psf->sf_count[MCAST_INCLUDE], psf->sf_count[MCAST_EXCLUDE]); } return 0; } static const struct seq_operations igmp_mcf_seq_ops = { .start = igmp_mcf_seq_start, .next = igmp_mcf_seq_next, .stop = igmp_mcf_seq_stop, .show = igmp_mcf_seq_show, }; static int igmp_mcf_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &igmp_mcf_seq_ops, sizeof(struct igmp_mcf_iter_state)); } static const struct file_operations igmp_mcf_seq_fops = { .owner = THIS_MODULE, .open = igmp_mcf_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static int __net_init igmp_net_init(struct net *net) { struct proc_dir_entry *pde; pde = proc_net_fops_create(net, "igmp", S_IRUGO, &igmp_mc_seq_fops); if (!pde) goto out_igmp; pde = proc_net_fops_create(net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops); if (!pde) goto out_mcfilter; return 0; out_mcfilter: proc_net_remove(net, "igmp"); out_igmp: return -ENOMEM; } static void __net_exit igmp_net_exit(struct net *net) { proc_net_remove(net, "mcfilter"); proc_net_remove(net, "igmp"); } static struct pernet_operations igmp_net_ops = { .init = igmp_net_init, .exit = igmp_net_exit, }; int __init igmp_mc_proc_init(void) { return register_pernet_subsys(&igmp_net_ops); } #endif
./CrossVul/dataset_final_sorted/CWE-399/c/bad_3575_0